From f9fe06cdaf77f7e88137b3ae0b41bc6acadff04a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:46 +0000 Subject: [PATCH 01/18] chore: update docs/dyn/index.md --- docs/dyn/index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/dyn/index.md b/docs/dyn/index.md index c561c8c7ae..0b9e336946 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -919,6 +919,7 @@ * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/osconfig_v1.html) * [v1alpha](http://googleapis.github.io/google-api-python-client/docs/dyn/osconfig_v1alpha.html) * [v1beta](http://googleapis.github.io/google-api-python-client/docs/dyn/osconfig_v1beta.html) +* [v2beta](http://googleapis.github.io/google-api-python-client/docs/dyn/osconfig_v2beta.html) ## oslogin From b153244e0f9a4f5c4728baff9d8d98156e2c030c Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:46 +0000 Subject: [PATCH 02/18] feat(aiplatform): update the api #### aiplatform:v1 The following keys were deleted: - schemas.GoogleCloudAiplatformV1CustomJobSpec.properties.pscInterfaceConfig.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1PscInterfaceConfig (Total Keys: 2) The following keys were added: - resources.projects.resources.locations.resources.notebookRuntimes.methods.stop (Total Keys: 12) - schemas.GoogleCloudAiplatformV1ClientConnectionConfig (Total Keys: 4) - schemas.GoogleCloudAiplatformV1DeployedModel.properties.systemLabels (Total Keys: 2) - schemas.GoogleCloudAiplatformV1Endpoint.properties.clientConnectionConfig.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1FeatureView.properties.optimizedConfig.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1FeatureViewOptimizedConfig (Total Keys: 3) - schemas.GoogleCloudAiplatformV1GroundingChunkRetrievedContext.properties.text.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt.properties.infillPrefix.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt.properties.infillSuffix.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt.properties.predictionInputs (Total Keys: 2) - schemas.GoogleCloudAiplatformV1StopNotebookRuntimeRequest (Total Keys: 2) - schemas.GoogleCloudAiplatformV1VertexRagStore.properties.similarityTopK.deprecated (Total Keys: 1) #### aiplatform:v1beta1 The following keys were added: - resources.projects.resources.locations.resources.endpoints.methods.update (Total Keys: 12) - resources.projects.resources.locations.resources.notebookRuntimes.methods.stop (Total Keys: 12) - schemas.GoogleCloudAiplatformV1beta1ClientConnectionConfig (Total Keys: 4) - schemas.GoogleCloudAiplatformV1beta1CodeExecutionResult (Total Keys: 4) - schemas.GoogleCloudAiplatformV1beta1DeployedModel.properties.systemLabels (Total Keys: 2) - schemas.GoogleCloudAiplatformV1beta1Endpoint.properties.clientConnectionConfig.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1ExecutableCode (Total Keys: 4) - schemas.GoogleCloudAiplatformV1beta1GroundingChunkRetrievedContext.properties.text.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1ImportRagFilesConfig.properties.partialFailureBigquerySink.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1ImportRagFilesConfig.properties.partialFailureGcsSink.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1ImportRagFilesConfig.properties.ragFileChunkingConfig.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1Part.properties.codeExecutionResult.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1Part.properties.executableCode.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig.properties.defaultRuntime.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigDefaultRuntime (Total Keys: 3) - schemas.GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigPersistentResourceRuntimeDetail (Total Keys: 6) - schemas.GoogleCloudAiplatformV1beta1RagCorpus.properties.ragEmbeddingModelConfig.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagCorpus.properties.ragVectorDbConfig.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagFileChunkingConfig.properties.chunkOverlap.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagFileChunkingConfig.properties.chunkSize.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagFileParsingConfig.properties.useAdvancedPdfParsing.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagQuery.properties.ranking.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RagQuery.properties.similarityTopK.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1RaySpec.properties.nfsMounts (Total Keys: 2) - schemas.GoogleCloudAiplatformV1beta1SchemaPromptSpecStructuredPrompt.properties.infillPrefix.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1SchemaPromptSpecStructuredPrompt.properties.infillSuffix.type (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1SchemaPromptSpecStructuredPrompt.properties.predictionInputs (Total Keys: 2) - schemas.GoogleCloudAiplatformV1beta1StopNotebookRuntimeRequest (Total Keys: 2) - schemas.GoogleCloudAiplatformV1beta1Tool.properties.codeExecution.$ref (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1ToolCodeExecution (Total Keys: 2) - schemas.GoogleCloudAiplatformV1beta1UpdateEndpointLongRunningRequest (Total Keys: 3) - schemas.GoogleCloudAiplatformV1beta1UploadRagFileConfig.properties.ragFileChunkingConfig.deprecated (Total Keys: 1) - schemas.GoogleCloudAiplatformV1beta1VertexRagStore.properties.similarityTopK.deprecated (Total Keys: 1) --- docs/dyn/aiplatform_v1.endpoints.html | 2 + ...form_v1.projects.locations.customJobs.html | 8 - ...cts.locations.deploymentResourcePools.html | 3 + ...tform_v1.projects.locations.endpoints.html | 38 ++ ...ions.featureOnlineStores.featureViews.html | 26 +- ...ts.locations.hyperparameterTuningJobs.html | 8 - ...latform_v1.projects.locations.nasJobs.html | 16 - ...1.projects.locations.notebookRuntimes.html | 44 +++ ....projects.locations.publishers.models.html | 2 + docs/dyn/aiplatform_v1.publishers.models.html | 6 +- docs/dyn/aiplatform_v1beta1.endpoints.html | 80 ++++ docs/dyn/aiplatform_v1beta1.media.html | 4 +- ...ta1.projects.locations.cachedContents.html | 108 ++++++ ...v1beta1.projects.locations.customJobs.html | 8 +- ...cts.locations.deploymentResourcePools.html | 3 + ..._v1beta1.projects.locations.endpoints.html | 366 ++++++++++++++++++ ...v1beta1.projects.locations.extensions.html | 26 +- ...ions.featureOnlineStores.featureViews.html | 2 +- ...aiplatform_v1beta1.projects.locations.html | 24 ++ ...ts.locations.hyperparameterTuningJobs.html | 8 +- ...rm_v1beta1.projects.locations.nasJobs.html | 16 +- ...1.projects.locations.notebookRuntimes.html | 44 +++ ...rojects.locations.persistentResources.html | 36 +- ...beta1.projects.locations.pipelineJobs.html | 36 +- ....projects.locations.publishers.models.html | 80 ++++ ...rojects.locations.ragCorpora.ragFiles.html | 10 +- ...1.projects.locations.reasoningEngines.html | 8 +- ..._v1beta1.projects.locations.schedules.html | 54 ++- ...v1beta1.projects.locations.tuningJobs.html | 80 ++++ .../aiplatform_v1beta1.publishers.models.html | 88 ++++- .../documents/aiplatform.v1.json | 108 +++++- .../documents/aiplatform.v1beta1.json | 265 ++++++++++++- 32 files changed, 1499 insertions(+), 108 deletions(-) diff --git a/docs/dyn/aiplatform_v1.endpoints.html b/docs/dyn/aiplatform_v1.endpoints.html index dda9ec2055..dd47308d62 100644 --- a/docs/dyn/aiplatform_v1.endpoints.html +++ b/docs/dyn/aiplatform_v1.endpoints.html @@ -754,6 +754,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -1172,6 +1173,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html index 25f9ae5f3b..83c11d6f09 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html @@ -169,8 +169,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -296,8 +294,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -465,8 +461,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -605,8 +599,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html b/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html index 5837046c7d..8304db3355 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html +++ b/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html @@ -599,6 +599,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. diff --git a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html index 3de4931bbe..7cc3be7b62 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html @@ -496,6 +496,9 @@

Method Details

The object takes the form of: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -653,6 +656,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -918,6 +924,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, "trafficSplit": { # A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. "a_key": 42, @@ -1639,6 +1648,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -1746,6 +1756,9 @@

Method Details

An object of the form: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -1903,6 +1916,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -1963,6 +1979,9 @@

Method Details

{ # Response message for EndpointService.ListEndpoints. "endpoints": [ # List of Endpoints in the requested page. { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2120,6 +2139,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -2338,6 +2360,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, "updateMask": "A String", # Required. The update mask applies to the resource. See google.protobuf.FieldMask. } @@ -2381,6 +2406,9 @@

Method Details

The object takes the form of: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2538,6 +2566,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -2585,6 +2616,9 @@

Method Details

An object of the form: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2742,6 +2776,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -3370,6 +3407,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html index 2691f612b5..0ba2151aeb 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html +++ b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html @@ -176,6 +176,12 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` + "optimizedConfig": { # Configuration for FeatureViews created in Optimized FeatureOnlineStore. # Optional. Configuration for FeatureView created under Optimized FeatureOnlineStore. + "automaticResources": { # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. # Optional. A description of resources that the FeatureView uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2. If max_replica_count is not set, the default value is 6. The max allowed replica count is 1000. + "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + "minReplicaCount": 42, # Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "syncConfig": { # Configuration for Sync. Only one option is set. # Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. @@ -396,6 +402,12 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` + "optimizedConfig": { # Configuration for FeatureViews created in Optimized FeatureOnlineStore. # Optional. Configuration for FeatureView created under Optimized FeatureOnlineStore. + "automaticResources": { # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. # Optional. A description of resources that the FeatureView uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2. If max_replica_count is not set, the default value is 6. The max allowed replica count is 1000. + "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + "minReplicaCount": 42, # Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "syncConfig": { # Configuration for Sync. Only one option is set. # Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. @@ -503,6 +515,12 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` + "optimizedConfig": { # Configuration for FeatureViews created in Optimized FeatureOnlineStore. # Optional. Configuration for FeatureView created under Optimized FeatureOnlineStore. + "automaticResources": { # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. # Optional. A description of resources that the FeatureView uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2. If max_replica_count is not set, the default value is 6. The max allowed replica count is 1000. + "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + "minReplicaCount": 42, # Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "syncConfig": { # Configuration for Sync. Only one option is set. # Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. @@ -581,6 +599,12 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` + "optimizedConfig": { # Configuration for FeatureViews created in Optimized FeatureOnlineStore. # Optional. Configuration for FeatureView created under Optimized FeatureOnlineStore. + "automaticResources": { # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. # Optional. A description of resources that the FeatureView uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2. If max_replica_count is not set, the default value is 6. The max allowed replica count is 1000. + "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + "minReplicaCount": 42, # Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "syncConfig": { # Configuration for Sync. Only one option is set. # Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. @@ -594,7 +618,7 @@

Method Details

}, } - updateMask: string, Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` + updateMask: string, Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` * `optimized_config.automatic_resources` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html index d869d3aaf5..e8bec69801 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html @@ -272,8 +272,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -534,8 +532,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -838,8 +834,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1113,8 +1107,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html index 93e48fd40b..c0e6fa28ff 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html @@ -225,8 +225,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -318,8 +316,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -499,8 +495,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -592,8 +586,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -815,8 +807,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -908,8 +898,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1102,8 +1090,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1195,8 +1181,6 @@

Method Details

"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimes.html b/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimes.html index 9da27993b6..3ebb925e22 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimes.html +++ b/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimes.html @@ -100,6 +100,9 @@

Instance Methods

start(name, body=None, x__xgafv=None)

Starts a NotebookRuntime.

+

+ stop(name, body=None, x__xgafv=None)

+

Stops a NotebookRuntime.

upgrade(name, body=None, x__xgafv=None)

Upgrades a NotebookRuntime.

@@ -386,6 +389,47 @@

Method Details

} +
+ stop(name, body=None, x__xgafv=None) +
Stops a NotebookRuntime.
+
+Args:
+  name: string, Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for NotebookService.StopNotebookRuntime.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
upgrade(name, body=None, x__xgafv=None)
Upgrades a NotebookRuntime.
diff --git a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html
index 23e3f3ca69..f2e0057a0e 100644
--- a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html
+++ b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html
@@ -766,6 +766,7 @@ 

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -1448,6 +1449,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1.publishers.models.html b/docs/dyn/aiplatform_v1.publishers.models.html index 872cda658c..7c62ad2de2 100644 --- a/docs/dyn/aiplatform_v1.publishers.models.html +++ b/docs/dyn/aiplatform_v1.publishers.models.html @@ -757,6 +757,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -978,7 +979,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1077,7 +1078,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1568,6 +1569,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1beta1.endpoints.html b/docs/dyn/aiplatform_v1beta1.endpoints.html index 1325020fa6..42588d3602 100644 --- a/docs/dyn/aiplatform_v1beta1.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.endpoints.html @@ -109,6 +109,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -182,6 +190,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -278,6 +294,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -309,6 +333,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -443,6 +469,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -545,6 +579,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -584,6 +626,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -720,6 +764,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -755,6 +807,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -866,6 +919,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -968,6 +1029,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1007,6 +1076,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -1143,6 +1214,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1178,6 +1257,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1beta1.media.html b/docs/dyn/aiplatform_v1beta1.media.html index c163ff9d43..54ff4e6970 100644 --- a/docs/dyn/aiplatform_v1beta1.media.html +++ b/docs/dyn/aiplatform_v1beta1.media.html @@ -122,7 +122,7 @@

Method Details

"jiraSource": { # The Jira source for the ImportRagFilesRequest. # The RagFile is imported from a Jira query. "jiraQueries": [ # Required. The Jira queries. { # JiraQueries contains the Jira queries and corresponding authentication. - "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ @@ -228,7 +228,7 @@

Method Details

"jiraSource": { # The Jira source for the ImportRagFilesRequest. # The RagFile is imported from a Jira query. "jiraQueries": [ # Required. The Jira queries. { # JiraQueries contains the Jira queries and corresponding authentication. - "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html b/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html index 9c15e089de..153f9db6c9 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.cachedContents.html @@ -115,6 +115,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -153,6 +161,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -192,6 +208,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -319,6 +337,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -357,6 +383,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -396,6 +430,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -548,6 +584,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -586,6 +630,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -625,6 +677,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -763,6 +817,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -801,6 +863,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -840,6 +910,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -986,6 +1058,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1024,6 +1104,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1063,6 +1151,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -1191,6 +1281,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1229,6 +1327,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input only. Immutable. Developer set system instruction. Currently, text only "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1268,6 +1374,8 @@

Method Details

}, "tools": [ # Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html index b2b036e047..25dda74a23 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html @@ -170,7 +170,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -298,7 +298,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -468,7 +468,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -609,7 +609,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.deploymentResourcePools.html b/docs/dyn/aiplatform_v1beta1.projects.locations.deploymentResourcePools.html index fcd0e1f521..e04c659457 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.deploymentResourcePools.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.deploymentResourcePools.html @@ -604,6 +604,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html index e3e90b128f..c01719e305 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html @@ -156,6 +156,9 @@

Instance Methods

undeployModel(endpoint, body=None, x__xgafv=None)

Undeploys a Model from an Endpoint, removing a DeployedModel from it, and freeing all resources it's using.

+

+ update(name, body=None, x__xgafv=None)

+

Updates an Endpoint with a long running operation.

Method Details

close() @@ -176,6 +179,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -249,6 +260,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -345,6 +364,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -376,6 +403,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -505,6 +534,9 @@

Method Details

The object takes the form of: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -667,6 +699,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -938,6 +973,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, "trafficSplit": { # A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. "a_key": 42, @@ -1476,6 +1514,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1578,6 +1624,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1617,6 +1671,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -1753,6 +1809,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1788,6 +1852,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -1899,6 +1964,9 @@

Method Details

An object of the form: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2061,6 +2129,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -2156,6 +2227,9 @@

Method Details

{ # Response message for EndpointService.ListEndpoints. "endpoints": [ # List of Endpoints in the requested page. { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2318,6 +2392,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -2542,6 +2619,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, "updateMask": "A String", # Required. The update mask applies to the resource. See google.protobuf.FieldMask. } @@ -2585,6 +2665,9 @@

Method Details

The object takes the form of: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2747,6 +2830,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -2795,6 +2881,9 @@

Method Details

An object of the form: { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction. + "inferenceTimeout": "A String", # Customizable online prediction request timeout. + }, "createTime": "A String", # Output only. Timestamp when this Endpoint was created. "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon. @@ -2957,6 +3046,9 @@

Method Details

}, "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` + "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + "a_key": "A String", + }, }, ], "description": "A String", # The description of the Endpoint. @@ -3333,6 +3425,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -3435,6 +3535,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -3474,6 +3582,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -3610,6 +3720,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -3645,6 +3763,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -3848,4 +3967,251 @@

Method Details

}
+
+ update(name, body=None, x__xgafv=None) +
Updates an Endpoint with a long running operation.
+
+Args:
+  name: string, Output only. The resource name of the Endpoint. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for EndpointService.UpdateEndpointLongRunning.
+  "endpoint": { # Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. # Required. The Endpoint which replaces the resource on the server. Currently we only support updating the `client_connection_config` field, all the other fields' update will be blocked.
+    "clientConnectionConfig": { # Configurations (e.g. inference timeout) that are applied on your endpoints. # Configurations that are applied to the endpoint for online prediction.
+      "inferenceTimeout": "A String", # Customizable online prediction request timeout.
+    },
+    "createTime": "A String", # Output only. Timestamp when this Endpoint was created.
+    "dedicatedEndpointDns": "A String", # Output only. DNS of the dedicated endpoint. Will only be populated if dedicated_endpoint_enabled is true. Format: `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`.
+    "dedicatedEndpointEnabled": True or False, # If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
+    "deployedModels": [ # Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively.
+      { # A deployment of a Model. Endpoints contain one or more DeployedModels.
+        "automaticResources": { # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. # A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration.
+          "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.
+          "minReplicaCount": 42, # Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.
+        },
+        "createTime": "A String", # Output only. Timestamp when the DeployedModel was created.
+        "dedicatedResources": { # A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. # A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration.
+          "autoscalingMetricSpecs": [ # Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`.
+            { # The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count.
+              "metricName": "A String", # Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
+              "target": 42, # The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
+            },
+          ],
+          "machineSpec": { # Specification of a single machine. # Required. Immutable. The specification of a single machine used by the prediction.
+            "acceleratorCount": 42, # The number of accelerators to attach to the machine.
+            "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count.
+            "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required.
+            "reservationAffinity": { # A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a DeployedModel) to draw its Compute Engine resources from a Shared Reservation, or exclusively from on-demand capacity. # Optional. Immutable. Configuration controlling how this resource pool consumes reservation.
+              "key": "A String", # Optional. Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name` as the key and specify the name of your reservation as its value.
+              "reservationAffinityType": "A String", # Required. Specifies the reservation affinity type.
+              "values": [ # Optional. Corresponds to the label values of a reservation resource. This must be the full resource name of the reservation.
+                "A String",
+              ],
+            },
+            "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1").
+          },
+          "maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
+          "minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
+          "spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms).
+        },
+        "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec.
+        "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used.
+        "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option.
+        "enableContainerLogging": True or False, # If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging. Only supported for custom-trained Models and AutoML Tabular Models.
+        "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration.
+          "metadata": { # Metadata describing the Model's input and output for explanation. # Optional. Metadata describing the Model's input and output for explanation.
+            "featureAttributionsSchemaUri": "A String", # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
+            "inputs": { # Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance.
+              "a_key": { # Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow.
+                "denseShapeTensorName": "A String", # Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
+                "encodedBaselines": [ # A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor.
+                  "",
+                ],
+                "encodedTensorName": "A String", # Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table.
+                "encoding": "A String", # Defines how the feature is encoded into the input tensor. Defaults to IDENTITY.
+                "featureValueDomain": { # Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. # The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized.
+                  "maxValue": 3.14, # The maximum permissible value for this feature.
+                  "minValue": 3.14, # The minimum permissible value for this feature.
+                  "originalMean": 3.14, # If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization.
+                  "originalStddev": 3.14, # If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization.
+                },
+                "groupName": "A String", # Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name.
+                "indexFeatureMapping": [ # A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
+                  "A String",
+                ],
+                "indicesTensorName": "A String", # Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
+                "inputBaselines": [ # Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the instance[]. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri.
+                  "",
+                ],
+                "inputTensorName": "A String", # Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow.
+                "modality": "A String", # Modality of the feature. Valid values are: numeric, image. Defaults to numeric.
+                "visualization": { # Visualization configurations for image explanation. # Visualization configurations for image explanation.
+                  "clipPercentLowerbound": 3.14, # Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62.
+                  "clipPercentUpperbound": 3.14, # Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9.
+                  "colorMap": "A String", # The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue.
+                  "overlayType": "A String", # How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE.
+                  "polarity": "A String", # Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE.
+                  "type": "A String", # Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES.
+                },
+              },
+            },
+            "latentSpaceSource": "A String", # Name of the source to generate embeddings for example based explanations.
+            "outputs": { # Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed.
+              "a_key": { # Metadata of the prediction output to be explained.
+                "displayNameMappingKey": "A String", # Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output.
+                "indexDisplayNameMapping": "", # Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It's not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index.
+                "outputTensorName": "A String", # Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow.
+              },
+            },
+          },
+          "parameters": { # Parameters to configure explaining for Model's predictions. # Required. Parameters that configure explaining of the Model's predictions.
+            "examples": { # Example-based explainability that returns the nearest neighbors from the provided dataset. # Example-based explanations that returns the nearest neighbors from the provided dataset.
+              "exampleGcsSource": { # The Cloud Storage input instances. # The Cloud Storage input instances.
+                "dataFormat": "A String", # The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported.
+                "gcsSource": { # The Google Cloud Storage location for the input content. # The Cloud Storage location for the input instances.
+                  "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+                    "A String",
+                  ],
+                },
+              },
+              "gcsSource": { # The Google Cloud Storage location for the input content. # The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search.
+                "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+                  "A String",
+                ],
+              },
+              "nearestNeighborSearchConfig": "", # The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
+              "neighborCount": 42, # The number of neighbors to return when querying for examples.
+              "presets": { # Preset configuration for example-based explanations # Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality.
+                "modality": "A String", # The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type.
+                "query": "A String", # Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`.
+              },
+            },
+            "integratedGradientsAttribution": { # An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 # An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365
+              "blurBaselineConfig": { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
+                "maxBlurSigma": 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
+              },
+              "smoothGradConfig": { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
+                "featureNoiseSigma": { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
+                  "noiseSigma": [ # Noise sigma per feature. No noise is added to features that are not set.
+                    { # Noise sigma for a single feature.
+                      "name": "A String", # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
+                      "sigma": 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
+                    },
+                  ],
+                },
+                "noiseSigma": 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
+                "noisySampleCount": 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
+              },
+              "stepCount": 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively.
+            },
+            "outputIndices": [ # If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes).
+              "",
+            ],
+            "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
+              "pathCount": 42, # Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively.
+            },
+            "topK": 42, # If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs.
+            "xraiAttribution": { # An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. # An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead.
+              "blurBaselineConfig": { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
+                "maxBlurSigma": 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
+              },
+              "smoothGradConfig": { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
+                "featureNoiseSigma": { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
+                  "noiseSigma": [ # Noise sigma per feature. No noise is added to features that are not set.
+                    { # Noise sigma for a single feature.
+                      "name": "A String", # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
+                      "sigma": 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
+                    },
+                  ],
+                },
+                "noiseSigma": 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
+                "noisySampleCount": 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
+              },
+              "stepCount": 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.
+            },
+          },
+        },
+        "id": "A String", # Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are `/[0-9]/`.
+        "model": "A String", # Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed.
+        "modelVersionId": "A String", # Output only. The version ID of the model that is deployed.
+        "privateEndpoints": { # PrivateEndpoints proto is used to provide paths for users to send requests privately. To send request via private service access, use predict_http_uri, explain_http_uri or health_http_uri. To send request via private service connect, use service_attachment. # Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured.
+          "explainHttpUri": "A String", # Output only. Http(s) path to send explain requests.
+          "healthHttpUri": "A String", # Output only. Http(s) path to send health check requests.
+          "predictHttpUri": "A String", # Output only. Http(s) path to send prediction requests.
+          "serviceAttachment": "A String", # Output only. The name of the service attachment resource. Populated if private service connect is enabled.
+        },
+        "serviceAccount": "A String", # The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account.
+        "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`
+        "systemLabels": { # System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only.
+          "a_key": "A String",
+        },
+      },
+    ],
+    "description": "A String", # The description of the Endpoint.
+    "displayName": "A String", # Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+    "enablePrivateServiceConnect": True or False, # Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set.
+    "encryptionSpec": { # Represents a customer-managed encryption key spec that can be applied to a top-level resource. # Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
+      "kmsKeyName": "A String", # Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
+    },
+    "etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
+    "labels": { # The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
+      "a_key": "A String",
+    },
+    "modelDeploymentMonitoringJob": "A String", # Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
+    "name": "A String", # Output only. The resource name of the Endpoint.
+    "network": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
+    "predictRequestResponseLoggingConfig": { # Configuration for logging request-response to a BigQuery table. # Configures the request-response logging for online prediction.
+      "bigqueryDestination": { # The BigQuery location for the output content. # BigQuery table for logging. If only given a project, a new dataset will be created with name `logging__` where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name `request_response_logging`
+        "outputUri": "A String", # Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`.
+      },
+      "enabled": True or False, # If logging is enabled or not.
+      "samplingRate": 3.14, # Percentage of requests to be logged, expressed as a fraction in range(0,1].
+    },
+    "privateServiceConnectConfig": { # Represents configuration for private service connect. # Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive.
+      "enablePrivateServiceConnect": True or False, # Required. If true, expose the IndexEndpoint via private service connect.
+      "enableSecurePrivateServiceConnect": True or False, # Optional. If set to true, enable secure private service connect with IAM authorization. Otherwise, private service connect will be done without authorization. Note latency will be slightly increased if authorization is enabled.
+      "projectAllowlist": [ # A list of Projects from which the forwarding rule will target the service attachment.
+        "A String",
+      ],
+      "serviceAttachment": "A String", # Output only. The name of the generated service attachment resource. This is only populated if the endpoint is deployed with PrivateServiceConnect.
+    },
+    "satisfiesPzi": True or False, # Output only. Reserved for future use.
+    "satisfiesPzs": True or False, # Output only. Reserved for future use.
+    "trafficSplit": { # A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
+      "a_key": 42,
+    },
+    "updateTime": "A String", # Output only. Timestamp when this Endpoint was last updated.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html index 188d9cc8f4..f9435927d4 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html @@ -325,7 +325,7 @@

Method Details

"name": "A String", # Required. Extension name shown to the LLM. The name can be up to 128 characters long. }, "name": "A String", # Identifier. The resource name of the Extension. - "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. "serviceDirectory": "A String", # Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. }, "runtimeConfig": { # Runtime configuration to run the extension. # Optional. Runtime config controlling the runtime behavior of this Extension. @@ -485,7 +485,7 @@

Method Details

"name": "A String", # Required. Extension name shown to the LLM. The name can be up to 128 characters long. }, "name": "A String", # Identifier. The resource name of the Extension. - "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. "serviceDirectory": "A String", # Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. }, "runtimeConfig": { # Runtime configuration to run the extension. # Optional. Runtime config controlling the runtime behavior of this Extension. @@ -684,7 +684,7 @@

Method Details

"name": "A String", # Required. Extension name shown to the LLM. The name can be up to 128 characters long. }, "name": "A String", # Identifier. The resource name of the Extension. - "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. "serviceDirectory": "A String", # Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. }, "runtimeConfig": { # Runtime configuration to run the extension. # Optional. Runtime config controlling the runtime behavior of this Extension. @@ -861,7 +861,7 @@

Method Details

"name": "A String", # Required. Extension name shown to the LLM. The name can be up to 128 characters long. }, "name": "A String", # Identifier. The resource name of the Extension. - "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. "serviceDirectory": "A String", # Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. }, "runtimeConfig": { # Runtime configuration to run the extension. # Optional. Runtime config controlling the runtime behavior of this Extension. @@ -1020,7 +1020,7 @@

Method Details

"name": "A String", # Required. Extension name shown to the LLM. The name can be up to 128 characters long. }, "name": "A String", # Identifier. The resource name of the Extension. - "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + "privateServiceConnectConfig": { # PrivateExtensionConfig configuration for the extension. # Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. "serviceDirectory": "A String", # Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. }, "runtimeConfig": { # Runtime configuration to run the extension. # Optional. Runtime config controlling the runtime behavior of this Extension. @@ -1072,6 +1072,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1118,6 +1126,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html index 642ced6f43..58efb0e81c 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html @@ -687,7 +687,7 @@

Method Details

}, } - updateMask: string, Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` + updateMask: string, Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` * `optimized_config.automatic_resources` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.html index 1a2328dffd..dea14445d1 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.html @@ -323,6 +323,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -387,6 +395,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -447,6 +463,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. Input content to corroborate, only text format is supported for now. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html index bc0de3991b..751e7cf33e 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html @@ -286,7 +286,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -562,7 +562,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -880,7 +880,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -1169,7 +1169,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html index 18342ca5ca..069abdbe4f 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html @@ -226,7 +226,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -320,7 +320,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -502,7 +502,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -596,7 +596,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -820,7 +820,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -914,7 +914,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -1109,7 +1109,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -1203,7 +1203,7 @@

Method Details

"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for CustomJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimes.html b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimes.html index b783ee4375..b1953a576b 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimes.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimes.html @@ -106,6 +106,9 @@

Instance Methods

start(name, body=None, x__xgafv=None)

Starts a NotebookRuntime.

+

+ stop(name, body=None, x__xgafv=None)

+

Stops a NotebookRuntime.

upgrade(name, body=None, x__xgafv=None)

Upgrades a NotebookRuntime.

@@ -463,6 +466,47 @@

Method Details

} +
+ stop(name, body=None, x__xgafv=None) +
Stops a NotebookRuntime.
+
+Args:
+  name: string, Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for NotebookService.StopNotebookRuntime.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
upgrade(name, body=None, x__xgafv=None)
Upgrades a NotebookRuntime.
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.persistentResources.html b/docs/dyn/aiplatform_v1beta1.projects.locations.persistentResources.html
index c24fcb192c..4a445cb323 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.persistentResources.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.persistentResources.html
@@ -139,7 +139,7 @@ 

Method Details

"name": "A String", # Immutable. Resource name of a PersistentResource. "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PersistentResource. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -182,6 +182,13 @@

Method Details

"raySpec": { # Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. # Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. "headNodeResourcePoolId": "A String", # Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. "imageUri": "A String", # Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field. + "nfsMounts": [ # Optional. Use if you want to mount to any NFS storages. + { # Represents a mount configuration for Network File System (NFS) to mount. + "mountPoint": "A String", # Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + "path": "A String", # Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + "server": "A String", # Required. IP address of the NFS server. + }, + ], "rayLogsSpec": { # Configuration for the Ray OSS Logs. # Optional. OSS Ray logging configurations. "disabled": True or False, # Optional. Flag to disable the export of Ray OSS logs to Cloud Logging. }, @@ -304,7 +311,7 @@

Method Details

"name": "A String", # Immutable. Resource name of a PersistentResource. "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PersistentResource. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -347,6 +354,13 @@

Method Details

"raySpec": { # Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. # Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. "headNodeResourcePoolId": "A String", # Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. "imageUri": "A String", # Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field. + "nfsMounts": [ # Optional. Use if you want to mount to any NFS storages. + { # Represents a mount configuration for Network File System (NFS) to mount. + "mountPoint": "A String", # Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + "path": "A String", # Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + "server": "A String", # Required. IP address of the NFS server. + }, + ], "rayLogsSpec": { # Configuration for the Ray OSS Logs. # Optional. OSS Ray logging configurations. "disabled": True or False, # Optional. Flag to disable the export of Ray OSS logs to Cloud Logging. }, @@ -410,7 +424,7 @@

Method Details

"name": "A String", # Immutable. Resource name of a PersistentResource. "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PersistentResource. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -453,6 +467,13 @@

Method Details

"raySpec": { # Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. # Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. "headNodeResourcePoolId": "A String", # Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. "imageUri": "A String", # Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field. + "nfsMounts": [ # Optional. Use if you want to mount to any NFS storages. + { # Represents a mount configuration for Network File System (NFS) to mount. + "mountPoint": "A String", # Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + "path": "A String", # Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + "server": "A String", # Required. IP address of the NFS server. + }, + ], "rayLogsSpec": { # Configuration for the Ray OSS Logs. # Optional. OSS Ray logging configurations. "disabled": True or False, # Optional. Flag to disable the export of Ray OSS logs to Cloud Logging. }, @@ -522,7 +543,7 @@

Method Details

"name": "A String", # Immutable. Resource name of a PersistentResource. "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PersistentResource. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", @@ -565,6 +586,13 @@

Method Details

"raySpec": { # Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. # Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. "headNodeResourcePoolId": "A String", # Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. "imageUri": "A String", # Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field. + "nfsMounts": [ # Optional. Use if you want to mount to any NFS storages. + { # Represents a mount configuration for Network File System (NFS) to mount. + "mountPoint": "A String", # Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + "path": "A String", # Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + "server": "A String", # Required. IP address of the NFS server. + }, + ], "rayLogsSpec": { # Configuration for the Ray OSS Logs. # Optional. OSS Ray logging configurations. "disabled": True or False, # Optional. Flag to disable the export of Ray OSS logs to Cloud Logging. }, diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html index 1112ba8301..ab8ca02b1a 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html @@ -464,12 +464,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -741,12 +748,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -1059,12 +1073,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -1350,12 +1371,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html index d090ad4485..e5c1b1a7f1 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html @@ -124,6 +124,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -197,6 +205,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -293,6 +309,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -324,6 +348,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -458,6 +484,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -560,6 +594,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -599,6 +641,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -735,6 +779,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -770,6 +822,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -1180,6 +1233,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1282,6 +1343,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1321,6 +1390,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -1457,6 +1528,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1492,6 +1571,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html index cc9a756891..d8e620fab3 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html @@ -178,7 +178,7 @@

Method Details

"jiraSource": { # The Jira source for the ImportRagFilesRequest. # The RagFile is imported from a Jira query. "jiraQueries": [ # Required. The Jira queries. { # JiraQueries contains the Jira queries and corresponding authentication. - "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ @@ -259,7 +259,7 @@

Method Details

"jiraSource": { # The Jira source for the ImportRagFilesRequest. # Jira queries with their corresponding authentication. "jiraQueries": [ # Required. The Jira queries. { # JiraQueries contains the Jira queries and corresponding authentication. - "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ @@ -274,10 +274,10 @@

Method Details

], }, "maxEmbeddingRequestsPerMin": 42, # Optional. The max number of queries per minute that this job is allowed to make to the embedding model specified on the corpus. This value is specific to this job and not shared across other import jobs. Consult the Quotas page on the project to set an appropriate value here. If unspecified, a default value of 1,000 QPM would be used. - "partialFailureBigquerySink": { # The BigQuery location for the output content. # The BigQuery destination to write partial failures to. It should be a bigquery table resource name (e.g. "bq://projectId.bqDatasetId.bqTableId"). If the dataset id does not exist, it will be created. If the table does not exist, it will be created with the expected schema. If the table exists, the schema will be validated and data will be added to this existing table. + "partialFailureBigquerySink": { # The BigQuery location for the output content. # The BigQuery destination to write partial failures to. It should be a bigquery table resource name (e.g. "bq://projectId.bqDatasetId.bqTableId"). The dataset must exist. If the table does not exist, it will be created with the expected schema. If the table exists, the schema will be validated and data will be added to this existing table. Deprecated. Prefer to use `import_result_bq_sink`. "outputUri": "A String", # Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. }, - "partialFailureGcsSink": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage path to write partial failures to. + "partialFailureGcsSink": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage path to write partial failures to. Deprecated. Prefer to use `import_result_gcs_sink`. "outputUriPrefix": "A String", # Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. }, "ragFileChunkingConfig": { # Specifies the size and overlap of chunks for RagFiles. # Specifies the size and overlap of chunks after importing RagFiles. @@ -397,7 +397,7 @@

Method Details

"jiraSource": { # The Jira source for the ImportRagFilesRequest. # The RagFile is imported from a Jira query. "jiraQueries": [ # Required. The Jira queries. { # JiraQueries contains the Jira queries and corresponding authentication. - "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + "apiKeyConfig": { # The API secret. # Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). "apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} }, "customQueries": [ # A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html b/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html index 841bb3cb57..c4ffbfe5ea 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html @@ -125,7 +125,7 @@

Method Details

"etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "name": "A String", # Identifier. The resource name of the ReasoningEngine. "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine - "classMethods": [ # Optional. Declarations for object class methods. + "classMethods": [ # Optional. Declarations for object class methods in OpenAPI specification format. { "a_key": "", # Properties of the object. }, @@ -225,7 +225,7 @@

Method Details

"etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "name": "A String", # Identifier. The resource name of the ReasoningEngine. "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine - "classMethods": [ # Optional. Declarations for object class methods. + "classMethods": [ # Optional. Declarations for object class methods in OpenAPI specification format. { "a_key": "", # Properties of the object. }, @@ -268,7 +268,7 @@

Method Details

"etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "name": "A String", # Identifier. The resource name of the ReasoningEngine. "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine - "classMethods": [ # Optional. Declarations for object class methods. + "classMethods": [ # Optional. Declarations for object class methods in OpenAPI specification format. { "a_key": "", # Properties of the object. }, @@ -316,7 +316,7 @@

Method Details

"etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "name": "A String", # Identifier. The resource name of the ReasoningEngine. "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine - "classMethods": [ # Optional. Declarations for object class methods. + "classMethods": [ # Optional. Declarations for object class methods in OpenAPI specification format. { "a_key": "", # Properties of the object. }, diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html index bc5efed458..f13e625064 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html @@ -716,12 +716,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -1377,12 +1384,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -2080,12 +2094,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -2755,12 +2776,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -3434,12 +3462,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. @@ -4096,12 +4131,19 @@

Method Details

], "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. "pscInterfaceConfig": { # Configuration for PSC-I. # Optional. Configuration for PSC-I for PipelineJob. - "networkAttachment": "A String", # Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. + "networkAttachment": "A String", # Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I. }, "reservedIpRanges": [ # A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], "runtimeConfig": { # The runtime config of a PipelineJob. # Runtime config of the pipeline. + "defaultRuntime": { # The default runtime for the PipelineJob. # Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview. + "persistentResourceRuntimeDetail": { # Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview # Persistent resource based runtime detail. + "persistentResourceName": "A String", # Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` + "taskResourceUnavailableTimeoutBehavior": "A String", # Specifies the behavior to take if the timeout is reached. + "taskResourceUnavailableWaitTimeMs": "A String", # The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0. + }, + }, "failurePolicy": "A String", # Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. "gcsOutputDirectory": "A String", # Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. "inputArtifacts": { # The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html index f253d4e98f..ac215f13aa 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html @@ -209,6 +209,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -303,6 +311,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -467,6 +483,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -561,6 +585,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -732,6 +764,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -826,6 +866,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1003,6 +1051,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1097,6 +1153,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1289,6 +1353,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1383,6 +1455,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. diff --git a/docs/dyn/aiplatform_v1beta1.publishers.models.html b/docs/dyn/aiplatform_v1beta1.publishers.models.html index 6d3d458666..5fbb7e9882 100644 --- a/docs/dyn/aiplatform_v1beta1.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.publishers.models.html @@ -118,6 +118,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -191,6 +199,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -287,6 +303,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -318,6 +342,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -452,6 +478,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -554,6 +588,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -593,6 +635,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -729,6 +773,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -764,6 +816,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, @@ -998,7 +1051,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1097,7 +1150,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1405,7 +1458,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1504,7 +1557,7 @@

Method Details

"spot": True or False, # Optional. If true, schedule the deployment workload on [spot VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). }, "deployMetadata": { # Metadata information about the deployment for managing deployment config. # Optional. Metadata information about this deployment config. - "labels": { # Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc. + "labels": { # Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc. "a_key": "A String", }, "sampleRequest": "A String", # Optional. Sample request for deployed endpoint. @@ -1700,6 +1753,14 @@

Method Details

{ # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1802,6 +1863,14 @@

Method Details

"systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -1841,6 +1910,8 @@

Method Details

}, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + "codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. + }, "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided. { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. @@ -1977,6 +2048,14 @@

Method Details

"content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "codeExecutionResult": { # Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. # Optional. Result of executing the [ExecutableCode]. + "outcome": "A String", # Required. Outcome of the code execution. + "output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise. + }, + "executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. # Optional. Code generated by the model that is meant to be executed. + "code": "A String", # Required. The code to be executed. + "language": "A String", # Required. Programming language of the `code`. + }, "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. @@ -2012,6 +2091,7 @@

Method Details

"groundingChunks": [ # List of supporting references retrieved from specified grounding source. { # Grounding chunk. "retrievedContext": { # Chunk from context retrieved by the retrieval tools. # Grounding chunk from context retrieved by the retrieval tools. + "text": "A String", # Text of the attribution. "title": "A String", # Title of the attribution. "uri": "A String", # URI reference of the attribution. }, diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index 7d54370fa4..39927321f3 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -5401,7 +5401,7 @@ "type": "string" }, "updateMask": { -"description": "Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron`", +"description": "Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` * `optimized_config.automatic_resources`", "format": "google-fieldmask", "location": "query", "type": "string" @@ -12796,6 +12796,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"stop": { +"description": "Stops a NotebookRuntime.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/notebookRuntimes/{notebookRuntimesId}:stop", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.notebookRuntimes.stop", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/notebookRuntimes/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:stop", +"request": { +"$ref": "GoogleCloudAiplatformV1StopNotebookRuntimeRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "upgrade": { "description": "Upgrades a NotebookRuntime.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/notebookRuntimes/{notebookRuntimesId}:upgrade", @@ -17874,7 +17902,7 @@ } } }, -"revision": "20241007", +"revision": "20241025", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -19655,6 +19683,18 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1ClientConnectionConfig": { +"description": "Configurations (e.g. inference timeout) that are applied on your endpoints.", +"id": "GoogleCloudAiplatformV1ClientConnectionConfig", +"properties": { +"inferenceTimeout": { +"description": "Customizable online prediction request timeout.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1CoherenceInput": { "description": "Input for coherence metric.", "id": "GoogleCloudAiplatformV1CoherenceInput", @@ -20547,10 +20587,6 @@ "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations", "type": "string" }, -"pscInterfaceConfig": { -"$ref": "GoogleCloudAiplatformV1PscInterfaceConfig", -"description": "Optional. Configuration for PSC-I for CustomJob." -}, "reservedIpRanges": { "description": "Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].", "items": { @@ -21352,6 +21388,13 @@ "sharedResources": { "description": "The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`", "type": "string" +}, +"systemLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only.", +"type": "object" } }, "type": "object" @@ -21562,6 +21605,10 @@ "description": "Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations.", "id": "GoogleCloudAiplatformV1Endpoint", "properties": { +"clientConnectionConfig": { +"$ref": "GoogleCloudAiplatformV1ClientConnectionConfig", +"description": "Configurations that are applied to the endpoint for online prediction." +}, "createTime": { "description": "Output only. Timestamp when this Endpoint was created.", "format": "google-datetime", @@ -23669,6 +23716,10 @@ "description": "Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}`", "type": "string" }, +"optimizedConfig": { +"$ref": "GoogleCloudAiplatformV1FeatureViewOptimizedConfig", +"description": "Optional. Configuration for FeatureView created under Optimized FeatureOnlineStore." +}, "satisfiesPzi": { "description": "Output only. Reserved for future use.", "readOnly": true, @@ -23848,6 +23899,17 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1FeatureViewOptimizedConfig": { +"description": "Configuration for FeatureViews created in Optimized FeatureOnlineStore.", +"id": "GoogleCloudAiplatformV1FeatureViewOptimizedConfig", +"properties": { +"automaticResources": { +"$ref": "GoogleCloudAiplatformV1AutomaticResources", +"description": "Optional. A description of resources that the FeatureView uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2. If max_replica_count is not set, the default value is 6. The max allowed replica count is 1000." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1FeatureViewSync": { "description": "FeatureViewSync is a representation of sync operation which copies data from data source to Feature View in Online Store.", "id": "GoogleCloudAiplatformV1FeatureViewSync", @@ -25049,6 +25111,10 @@ "description": "Chunk from context retrieved by the retrieval tools.", "id": "GoogleCloudAiplatformV1GroundingChunkRetrievedContext", "properties": { +"text": { +"description": "Text of the attribution.", +"type": "string" +}, "title": { "description": "Title of the attribution.", "type": "string" @@ -31006,12 +31072,6 @@ false }, "type": "object" }, -"GoogleCloudAiplatformV1PscInterfaceConfig": { -"description": "Configuration for PSC-I.", -"id": "GoogleCloudAiplatformV1PscInterfaceConfig", -"properties": {}, -"type": "object" -}, "GoogleCloudAiplatformV1PublisherModel": { "description": "A Model Garden Publisher Model.", "id": "GoogleCloudAiplatformV1PublisherModel", @@ -31223,7 +31283,7 @@ false "additionalProperties": { "type": "string" }, -"description": "Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc.", +"description": "Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc.", "type": "object" }, "sampleRequest": { @@ -35102,6 +35162,14 @@ false }, "type": "array" }, +"infillPrefix": { +"description": "Preamble: For infill prompt, the prefix before expected model response.", +"type": "string" +}, +"infillSuffix": { +"description": "Preamble: For infill prompt, the suffix after expected model response.", +"type": "string" +}, "inputPrefixes": { "description": "Preamble: The input prefixes before each example input.", "items": { @@ -35116,6 +35184,13 @@ false }, "type": "array" }, +"predictionInputs": { +"description": "Preamble: The input test data for prediction. Each PartList in this field represents one text-only input set for a single model request.", +"items": { +"$ref": "GoogleCloudAiplatformV1SchemaPromptSpecPartList" +}, +"type": "array" +}, "promptMessage": { "$ref": "GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage", "description": "The prompt message." @@ -37467,6 +37542,12 @@ false "properties": {}, "type": "object" }, +"GoogleCloudAiplatformV1StopNotebookRuntimeRequest": { +"description": "Request message for NotebookService.StopNotebookRuntime.", +"id": "GoogleCloudAiplatformV1StopNotebookRuntimeRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1StopTrialRequest": { "description": "Request message for VizierService.StopTrial.", "id": "GoogleCloudAiplatformV1StopTrialRequest", @@ -40349,6 +40430,7 @@ false "type": "array" }, "similarityTopK": { +"deprecated": true, "description": "Optional. Number of top k results to return from the selected corpora.", "format": "int32", "type": "integer" diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 0519e6dee8..159548b4d5 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -4990,6 +4990,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"update": { +"description": "Updates an Endpoint with a long running operation.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/endpoints/{endpointsId}:update", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.endpoints.update", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Output only. The resource name of the Endpoint.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/endpoints/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}:update", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1UpdateEndpointLongRunningRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } }, "resources": { @@ -7100,7 +7128,7 @@ "type": "string" }, "updateMask": { -"description": "Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron`", +"description": "Field mask is used to specify the fields to be overwritten in the FeatureView resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then only the non-empty fields present in the request will be overwritten. Set the update_mask to `*` to override all fields. Updatable fields: * `labels` * `service_agent_type` * `big_query_source` * `big_query_source.uri` * `big_query_source.entity_id_columns` * `feature_registry_source` * `feature_registry_source.feature_groups` * `sync_config` * `sync_config.cron` * `optimized_config.automatic_resources`", "format": "google-fieldmask", "location": "query", "type": "string" @@ -15140,6 +15168,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"stop": { +"description": "Stops a NotebookRuntime.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/notebookRuntimes/{notebookRuntimesId}:stop", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.notebookRuntimes.stop", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/notebookRuntimes/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}:stop", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1StopNotebookRuntimeRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "upgrade": { "description": "Upgrades a NotebookRuntime.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/notebookRuntimes/{notebookRuntimesId}:upgrade", @@ -21230,7 +21286,7 @@ } } }, -"revision": "20241007", +"revision": "20241025", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -23440,6 +23496,45 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ClientConnectionConfig": { +"description": "Configurations (e.g. inference timeout) that are applied on your endpoints.", +"id": "GoogleCloudAiplatformV1beta1ClientConnectionConfig", +"properties": { +"inferenceTimeout": { +"description": "Customizable online prediction request timeout.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1CodeExecutionResult": { +"description": "Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode].", +"id": "GoogleCloudAiplatformV1beta1CodeExecutionResult", +"properties": { +"outcome": { +"description": "Required. Outcome of the code execution.", +"enum": [ +"OUTCOME_UNSPECIFIED", +"OUTCOME_OK", +"OUTCOME_FAILED", +"OUTCOME_DEADLINE_EXCEEDED" +], +"enumDescriptions": [ +"Unspecified status. This value should not be used.", +"Code execution completed successfully.", +"Code execution finished but with a failure. `stderr` should contain the reason.", +"Code execution ran for too long, and was cancelled. There may or may not be a partial output present." +], +"type": "string" +}, +"output": { +"description": "Optional. Contains stdout when code execution is successful, stderr or other description otherwise.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1CoherenceInput": { "description": "Input for coherence metric.", "id": "GoogleCloudAiplatformV1beta1CoherenceInput", @@ -25418,6 +25513,13 @@ "sharedResources": { "description": "The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`", "type": "string" +}, +"systemLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only.", +"type": "object" } }, "type": "object" @@ -25718,6 +25820,10 @@ "description": "Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations.", "id": "GoogleCloudAiplatformV1beta1Endpoint", "properties": { +"clientConnectionConfig": { +"$ref": "GoogleCloudAiplatformV1beta1ClientConnectionConfig", +"description": "Configurations that are applied to the endpoint for online prediction." +}, "createTime": { "description": "Output only. Timestamp when this Endpoint was created.", "format": "google-datetime", @@ -26466,6 +26572,29 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ExecutableCode": { +"description": "Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE].", +"id": "GoogleCloudAiplatformV1beta1ExecutableCode", +"properties": { +"code": { +"description": "Required. The code to be executed.", +"type": "string" +}, +"language": { +"description": "Required. Programming language of the `code`.", +"enum": [ +"LANGUAGE_UNSPECIFIED", +"PYTHON" +], +"enumDescriptions": [ +"Unspecified language. This value should not be used.", +"Python >= 3.10, with numpy and simpy available." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1ExecuteExtensionRequest": { "description": "Request message for ExtensionExecutionService.ExecuteExtension.", "id": "GoogleCloudAiplatformV1beta1ExecuteExtensionRequest", @@ -27337,7 +27466,7 @@ }, "privateServiceConnectConfig": { "$ref": "GoogleCloudAiplatformV1beta1ExtensionPrivateServiceConnectConfig", -"description": "Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution." +"description": "Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be [registered with private network access in the provided Service Directory](https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution." }, "runtimeConfig": { "$ref": "GoogleCloudAiplatformV1beta1RuntimeConfig", @@ -29679,6 +29808,10 @@ "description": "Chunk from context retrieved by the retrieval tools.", "id": "GoogleCloudAiplatformV1beta1GroundingChunkRetrievedContext", "properties": { +"text": { +"description": "Text of the attribution.", +"type": "string" +}, "title": { "description": "Title of the attribution.", "type": "string" @@ -30162,14 +30295,17 @@ }, "partialFailureBigquerySink": { "$ref": "GoogleCloudAiplatformV1beta1BigQueryDestination", -"description": "The BigQuery destination to write partial failures to. It should be a bigquery table resource name (e.g. \"bq://projectId.bqDatasetId.bqTableId\"). If the dataset id does not exist, it will be created. If the table does not exist, it will be created with the expected schema. If the table exists, the schema will be validated and data will be added to this existing table." +"deprecated": true, +"description": "The BigQuery destination to write partial failures to. It should be a bigquery table resource name (e.g. \"bq://projectId.bqDatasetId.bqTableId\"). The dataset must exist. If the table does not exist, it will be created with the expected schema. If the table exists, the schema will be validated and data will be added to this existing table. Deprecated. Prefer to use `import_result_bq_sink`." }, "partialFailureGcsSink": { "$ref": "GoogleCloudAiplatformV1beta1GcsDestination", -"description": "The Cloud Storage path to write partial failures to." +"deprecated": true, +"description": "The Cloud Storage path to write partial failures to. Deprecated. Prefer to use `import_result_gcs_sink`." }, "ragFileChunkingConfig": { "$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"deprecated": true, "description": "Specifies the size and overlap of chunks after importing RagFiles." }, "ragFileParsingConfig": { @@ -30738,7 +30874,7 @@ "properties": { "apiKeyConfig": { "$ref": "GoogleCloudAiplatformV1beta1ApiAuthApiKeyConfig", -"description": "Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)." +"description": "Required. The SecretManager secret version resource name (e.g. projects/{project}/secrets/{secret}/versions/{version}) storing the Jira API key. See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)." }, "customQueries": { "description": "A list of custom Jira queries to import. For information about JQL (Jira Query Language), see https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/", @@ -35779,6 +35915,14 @@ false "description": "A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.", "id": "GoogleCloudAiplatformV1beta1Part", "properties": { +"codeExecutionResult": { +"$ref": "GoogleCloudAiplatformV1beta1CodeExecutionResult", +"description": "Optional. Result of executing the [ExecutableCode]." +}, +"executableCode": { +"$ref": "GoogleCloudAiplatformV1beta1ExecutableCode", +"description": "Optional. Code generated by the model that is meant to be executed." +}, "fileData": { "$ref": "GoogleCloudAiplatformV1beta1FileData", "description": "Optional. URI based data." @@ -36157,6 +36301,10 @@ false "description": "The runtime config of a PipelineJob.", "id": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig", "properties": { +"defaultRuntime": { +"$ref": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigDefaultRuntime", +"description": "Optional. The default runtime for the PipelineJob. If not provided, Vertex Custom Job(on demand) is used as the runtime. For Vertex Custom Job, please refer to https://cloud.google.com/vertex-ai/docs/training/overview." +}, "failurePolicy": { "description": "Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion.", "enum": [ @@ -36200,6 +36348,17 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigDefaultRuntime": { +"description": "The default runtime for the PipelineJob.", +"id": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigDefaultRuntime", +"properties": { +"persistentResourceRuntimeDetail": { +"$ref": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigPersistentResourceRuntimeDetail", +"description": "Persistent resource based runtime detail." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact": { "description": "The type of an input artifact.", "id": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact", @@ -36211,6 +36370,36 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigPersistentResourceRuntimeDetail": { +"description": "Persistent resource based runtime detail. For more information, refer to https://cloud.google.com/vertex-ai/docs/training/persistent-resource-overview", +"id": "GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigPersistentResourceRuntimeDetail", +"properties": { +"persistentResourceName": { +"description": "Persistent resource name. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}`", +"type": "string" +}, +"taskResourceUnavailableTimeoutBehavior": { +"description": "Specifies the behavior to take if the timeout is reached.", +"enum": [ +"TASK_RESOURCE_UNAVAILABLE_TIMEOUT_BEHAVIOR_UNSPECIFIED", +"FAIL", +"FALL_BACK_TO_ON_DEMAND" +], +"enumDescriptions": [ +"Unspecified. Behavior is same as `FAIL`.", +"Fail the task if the timeout is reached.", +"Fall back to on-demand execution if the timeout is reached." +], +"type": "string" +}, +"taskResourceUnavailableWaitTimeMs": { +"description": "The max time a pipeline task waits for the required CPU, memory, or accelerator resource to become available from the specified persistent resource. Default wait time is 0.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1PipelineTaskDetail": { "description": "The runtime detail of a task execution.", "id": "GoogleCloudAiplatformV1beta1PipelineTaskDetail", @@ -36865,7 +37054,7 @@ false "id": "GoogleCloudAiplatformV1beta1PscInterfaceConfig", "properties": { "networkAttachment": { -"description": "Optional. The full name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource. For example, `projects/12345/regions/us-central1/networkAttachments/myNA`. is of the form `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. Where {project} is a project number, as in `12345`, and {networkAttachment} is a network attachment name. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I.", +"description": "Optional. The name of the Compute Engine [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to attach to the resource within the region and user project. To specify this field, you must have already [created a network attachment] (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). This field is only used for resources using PSC-I.", "type": "string" } }, @@ -37086,7 +37275,7 @@ false "additionalProperties": { "type": "string" }, -"description": "Optional. Labels for the deployment. For managing deployment config like verifying, source of deployment config, etc.", +"description": "Optional. Labels for the deployment config. For managing deployment config like verifying, source of deployment config, etc.", "type": "object" }, "sampleRequest": { @@ -37901,10 +38090,12 @@ false }, "ragEmbeddingModelConfig": { "$ref": "GoogleCloudAiplatformV1beta1RagEmbeddingModelConfig", +"deprecated": true, "description": "Optional. Immutable. The embedding model config of the RagCorpus." }, "ragVectorDbConfig": { "$ref": "GoogleCloudAiplatformV1beta1RagVectorDbConfig", +"deprecated": true, "description": "Optional. Immutable. The Vector DB config of the RagCorpus." }, "updateTime": { @@ -38089,11 +38280,13 @@ false "id": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", "properties": { "chunkOverlap": { +"deprecated": true, "description": "The overlap between chunks.", "format": "int32", "type": "integer" }, "chunkSize": { +"deprecated": true, "description": "The size of the chunks.", "format": "int32", "type": "integer" @@ -38106,6 +38299,7 @@ false "id": "GoogleCloudAiplatformV1beta1RagFileParsingConfig", "properties": { "useAdvancedPdfParsing": { +"deprecated": true, "description": "Whether to use advanced PDF parsing.", "type": "boolean" } @@ -38118,9 +38312,11 @@ false "properties": { "ranking": { "$ref": "GoogleCloudAiplatformV1beta1RagQueryRanking", +"deprecated": true, "description": "Optional. Configurations for hybrid search results ranking." }, "similarityTopK": { +"deprecated": true, "description": "Optional. The number of contexts to retrieve.", "format": "int32", "type": "integer" @@ -38278,6 +38474,13 @@ false "description": "Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images} field.", "type": "string" }, +"nfsMounts": { +"description": "Optional. Use if you want to mount to any NFS storages.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1NfsMount" +}, +"type": "array" +}, "rayLogsSpec": { "$ref": "GoogleCloudAiplatformV1beta1RayLogsSpec", "description": "Optional. OSS Ray logging configurations." @@ -38545,7 +38748,7 @@ false "id": "GoogleCloudAiplatformV1beta1ReasoningEngineSpec", "properties": { "classMethods": { -"description": "Optional. Declarations for object class methods.", +"description": "Optional. Declarations for object class methods in OpenAPI specification format.", "items": { "additionalProperties": { "description": "Properties of the object.", @@ -41793,6 +41996,14 @@ false }, "type": "array" }, +"infillPrefix": { +"description": "Preamble: For infill prompt, the prefix before expected model response.", +"type": "string" +}, +"infillSuffix": { +"description": "Preamble: For infill prompt, the suffix after expected model response.", +"type": "string" +}, "inputPrefixes": { "description": "Preamble: The input prefixes before each example input.", "items": { @@ -41807,6 +42018,13 @@ false }, "type": "array" }, +"predictionInputs": { +"description": "Preamble: The input test data for prediction. Each PartList in this field represents one text-only input set for a single model request.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1SchemaPromptSpecPartList" +}, +"type": "array" +}, "promptMessage": { "$ref": "GoogleCloudAiplatformV1beta1SchemaPromptSpecPromptMessage", "description": "The prompt message." @@ -44404,6 +44622,12 @@ false "properties": {}, "type": "object" }, +"GoogleCloudAiplatformV1beta1StopNotebookRuntimeRequest": { +"description": "Request message for NotebookService.StopNotebookRuntime.", +"id": "GoogleCloudAiplatformV1beta1StopNotebookRuntimeRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1StopTrialRequest": { "description": "Request message for VizierService.StopTrial.", "id": "GoogleCloudAiplatformV1beta1StopTrialRequest", @@ -46266,6 +46490,10 @@ false "description": "Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval).", "id": "GoogleCloudAiplatformV1beta1Tool", "properties": { +"codeExecution": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCodeExecution", +"description": "Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services." +}, "functionDeclarations": { "description": "Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided.", "items": { @@ -46351,6 +46579,12 @@ false "properties": {}, "type": "object" }, +"GoogleCloudAiplatformV1beta1ToolCodeExecution": { +"description": "Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool.", +"id": "GoogleCloudAiplatformV1beta1ToolCodeExecution", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1ToolConfig": { "description": "Tool config. This config is shared for all tools provided in the request.", "id": "GoogleCloudAiplatformV1beta1ToolConfig", @@ -47164,6 +47398,17 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1beta1UpdateEndpointLongRunningRequest": { +"description": "Request message for EndpointService.UpdateEndpointLongRunning.", +"id": "GoogleCloudAiplatformV1beta1UpdateEndpointLongRunningRequest", +"properties": { +"endpoint": { +"$ref": "GoogleCloudAiplatformV1beta1Endpoint", +"description": "Required. The Endpoint which replaces the resource on the server. Currently we only support updating the `client_connection_config` field, all the other fields' update will be blocked." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1UpdateExplanationDatasetOperationMetadata": { "description": "Runtime operation information for ModelService.UpdateExplanationDataset.", "id": "GoogleCloudAiplatformV1beta1UpdateExplanationDatasetOperationMetadata", @@ -47403,6 +47648,7 @@ false "properties": { "ragFileChunkingConfig": { "$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"deprecated": true, "description": "Specifies the size and overlap of chunks after uploading RagFile." } }, @@ -47534,6 +47780,7 @@ false "type": "array" }, "similarityTopK": { +"deprecated": true, "description": "Optional. Number of top k results to return from the selected corpora.", "format": "int32", "type": "integer" From a8cfcf22cb51772aea978eac8bb80fa313bacc74 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 03/18] feat(checks): update the api #### checks:v1alpha The following keys were added: - resources.aisafety.methods.classifyContent (Total Keys: 7) - schemas.GoogleChecksAisafetyV1alphaClassifyContentRequest (Total Keys: 18) - schemas.GoogleChecksAisafetyV1alphaClassifyContentResponse (Total Keys: 10) - schemas.GoogleChecksAisafetyV1alphaTextInput (Total Keys: 4) --- docs/dyn/checks_v1alpha.aisafety.html | 135 ++++++++++++ docs/dyn/checks_v1alpha.html | 5 + .../documents/checks.v1alpha.json | 200 +++++++++++++++++- 3 files changed, 339 insertions(+), 1 deletion(-) create mode 100644 docs/dyn/checks_v1alpha.aisafety.html diff --git a/docs/dyn/checks_v1alpha.aisafety.html b/docs/dyn/checks_v1alpha.aisafety.html new file mode 100644 index 0000000000..44e3776468 --- /dev/null +++ b/docs/dyn/checks_v1alpha.aisafety.html @@ -0,0 +1,135 @@ + + + +

Checks API . aisafety

+

Instance Methods

+

+ classifyContent(body=None, x__xgafv=None)

+

Analyze a piece of content with the provided set of policies.

+

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ classifyContent(body=None, x__xgafv=None) +
Analyze a piece of content with the provided set of policies.
+
+Args:
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request proto for ClassifyContent RPC.
+  "classifierVersion": "A String", # Optional. Version of the classifier to use. If not specified, the latest version will be used.
+  "context": { # Context about the input that will be used to help on the classification. # Optional. Context about the input that will be used to help on the classification.
+    "prompt": "A String", # Optional. Prompt that generated the model response.
+  },
+  "input": { # Content to be classified. # Required. Content to be classified.
+    "textInput": { # Text input to be classified. # Content in text format.
+      "content": "A String", # Actual piece of text to be classified.
+      "languageCode": "A String", # Optional. Language of the text in ISO 639-1 format. If the language is invalid or not specified, the system will try to detect it.
+    },
+  },
+  "policies": [ # Required. List of policies to classify against.
+    { # List of policies to classify against.
+      "policyType": "A String", # Required. Type of the policy.
+      "threshold": 3.14, # Optional. Score threshold to use when deciding if the content is violative or non-violative. If not specified, the default 0.5 threshold for the policy will be used.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response proto for ClassifyContent RPC.
+  "policyResults": [ # Results of the classification for each policy.
+    { # Result for one policy against the corresponding input.
+      "policyType": "A String", # Type of the policy.
+      "score": 3.14, # Final score for the results of this policy.
+      "violationResult": "A String", # Result of the classification for the policy.
+    },
+  ],
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/checks_v1alpha.html b/docs/dyn/checks_v1alpha.html index 4bbcbf2502..5a5cbad0a3 100644 --- a/docs/dyn/checks_v1alpha.html +++ b/docs/dyn/checks_v1alpha.html @@ -79,6 +79,11 @@

Instance Methods

Returns the accounts Resource.

+

+ aisafety() +

+

Returns the aisafety Resource.

+

media()

diff --git a/googleapiclient/discovery_cache/documents/checks.v1alpha.json b/googleapiclient/discovery_cache/documents/checks.v1alpha.json index ceb624da8e..ff0ec385d7 100644 --- a/googleapiclient/discovery_cache/documents/checks.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/checks.v1alpha.json @@ -401,6 +401,25 @@ } } }, +"aisafety": { +"methods": { +"classifyContent": { +"description": "Analyze a piece of content with the provided set of policies.", +"flatPath": "v1alpha/aisafety:classifyContent", +"httpMethod": "POST", +"id": "checks.aisafety.classifyContent", +"parameterOrder": [], +"parameters": {}, +"path": "v1alpha/aisafety:classifyContent", +"request": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequest" +}, +"response": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponse" +} +} +} +}, "media": { "methods": { "upload": { @@ -444,7 +463,7 @@ } } }, -"revision": "20241025", +"revision": "20241029", "rootUrl": "https://checks.googleapis.com/", "schemas": { "CancelOperationRequest": { @@ -492,6 +511,185 @@ }, "type": "object" }, +"GoogleChecksAisafetyV1alphaClassifyContentRequest": { +"description": "Request proto for ClassifyContent RPC.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequest", +"properties": { +"classifierVersion": { +"description": "Optional. Version of the classifier to use. If not specified, the latest version will be used.", +"enum": [ +"CLASSIFIER_VERSION_UNSPECIFIED", +"STABLE", +"LATEST" +], +"enumDescriptions": [ +"Unspecified version.", +"Stable version.", +"Latest version." +], +"type": "string" +}, +"context": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext", +"description": "Optional. Context about the input that will be used to help on the classification." +}, +"input": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent", +"description": "Required. Content to be classified." +}, +"policies": { +"description": "Required. List of policies to classify against.", +"items": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaClassifyContentRequestContext": { +"description": "Context about the input that will be used to help on the classification.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext", +"properties": { +"prompt": { +"description": "Optional. Prompt that generated the model response.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent": { +"description": "Content to be classified.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent", +"properties": { +"textInput": { +"$ref": "GoogleChecksAisafetyV1alphaTextInput", +"description": "Content in text format." +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig": { +"description": "List of policies to classify against.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig", +"properties": { +"policyType": { +"description": "Required. Type of the policy.", +"enum": [ +"POLICY_TYPE_UNSPECIFIED", +"DANGEROUS_CONTENT", +"PII_SOLICITING_RECITING", +"HARASSMENT", +"SEXUALLY_EXPLICIT", +"HATE_SPEECH", +"MEDICAL_INFO", +"VIOLENCE_AND_GORE", +"OBSCENITY_AND_PROFANITY" +], +"enumDescriptions": [ +"Default.", +"The model facilitates, promotes or enables access to harmful goods, services, and activities.", +"The model reveals an individual\u2019s personal information and data.", +"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.", +"The model generates content that is sexually explicit in nature.", +"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.", +"The model facilitates harm by providing health advice or guidance.", +"The model generates content that contains gratuitous, realistic descriptions of violence or gore.", +"" +], +"type": "string" +}, +"threshold": { +"description": "Optional. Score threshold to use when deciding if the content is violative or non-violative. If not specified, the default 0.5 threshold for the policy will be used.", +"format": "float", +"type": "number" +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaClassifyContentResponse": { +"description": "Response proto for ClassifyContent RPC.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentResponse", +"properties": { +"policyResults": { +"description": "Results of the classification for each policy.", +"items": { +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult": { +"description": "Result for one policy against the corresponding input.", +"id": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult", +"properties": { +"policyType": { +"description": "Type of the policy.", +"enum": [ +"POLICY_TYPE_UNSPECIFIED", +"DANGEROUS_CONTENT", +"PII_SOLICITING_RECITING", +"HARASSMENT", +"SEXUALLY_EXPLICIT", +"HATE_SPEECH", +"MEDICAL_INFO", +"VIOLENCE_AND_GORE", +"OBSCENITY_AND_PROFANITY" +], +"enumDescriptions": [ +"Default.", +"The model facilitates, promotes or enables access to harmful goods, services, and activities.", +"The model reveals an individual\u2019s personal information and data.", +"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.", +"The model generates content that is sexually explicit in nature.", +"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.", +"The model facilitates harm by providing health advice or guidance.", +"The model generates content that contains gratuitous, realistic descriptions of violence or gore.", +"" +], +"type": "string" +}, +"score": { +"description": "Final score for the results of this policy.", +"format": "float", +"type": "number" +}, +"violationResult": { +"description": "Result of the classification for the policy.", +"enum": [ +"VIOLATION_RESULT_UNSPECIFIED", +"VIOLATIVE", +"NON_VIOLATIVE", +"CLASSIFICATION_ERROR" +], +"enumDescriptions": [ +"Unspecified result.", +"The final score is greater or equal the input score threshold.", +"The final score is smaller than the input score threshold.", +"There was an error and the violation result could not be determined." +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleChecksAisafetyV1alphaTextInput": { +"description": "Text input to be classified.", +"id": "GoogleChecksAisafetyV1alphaTextInput", +"properties": { +"content": { +"description": "Actual piece of text to be classified.", +"type": "string" +}, +"languageCode": { +"description": "Optional. Language of the text in ISO 639-1 format. If the language is invalid or not specified, the system will try to detect it.", +"type": "string" +} +}, +"type": "object" +}, "GoogleChecksReportV1alphaAnalyzeUploadRequest": { "description": "The request message for ReportService.AnalyzeUpload.", "id": "GoogleChecksReportV1alphaAnalyzeUploadRequest", From 53a22f02b587146f16592a0a7508505487f4dedf Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 04/18] feat(cloudbuild): update the api #### cloudbuild:v1 The following keys were added: - schemas.PrivatePoolV1Config.properties.privateServiceConnect.$ref (Total Keys: 1) - schemas.PrivateServiceConnect (Total Keys: 5) --- ...ild_v1.projects.locations.workerPools.html | 20 +++++++++++++++ .../documents/cloudbuild.v1.json | 25 ++++++++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html b/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html index b7b97f4f70..dbd9952cab 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html @@ -125,6 +125,11 @@

Method Details

"peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/build/docs/private-pools/set-up-private-pool-environment) "peeredNetworkIpRange": "A String", # Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. }, + "privateServiceConnect": { # Defines the Private Service Connect network configuration for the pool. # Immutable. Private Service Connect(PSC) Network configuration for the pool. + "networkAttachment": "A String", # Required. Immutable. The network attachment that the worker network interface is peered to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments) + "publicIpAddressDisabled": True or False, # Required. Immutable. Disable public IP on the primary network interface. If true, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured. If false, workers are created with a public address which allows for public internet egress. The public address only applies to traffic through the primary network interface. If `route_all_traffic` is set to true, all traffic will go through the non-primary network interface, this boolean has no effect. + "routeAllTraffic": True or False, # Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface. + }, "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. "machineType": "A String", # Optional. Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. @@ -233,6 +238,11 @@

Method Details

"peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/build/docs/private-pools/set-up-private-pool-environment) "peeredNetworkIpRange": "A String", # Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. }, + "privateServiceConnect": { # Defines the Private Service Connect network configuration for the pool. # Immutable. Private Service Connect(PSC) Network configuration for the pool. + "networkAttachment": "A String", # Required. Immutable. The network attachment that the worker network interface is peered to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments) + "publicIpAddressDisabled": True or False, # Required. Immutable. Disable public IP on the primary network interface. If true, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured. If false, workers are created with a public address which allows for public internet egress. The public address only applies to traffic through the primary network interface. If `route_all_traffic` is set to true, all traffic will go through the non-primary network interface, this boolean has no effect. + "routeAllTraffic": True or False, # Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface. + }, "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. "machineType": "A String", # Optional. Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. @@ -278,6 +288,11 @@

Method Details

"peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/build/docs/private-pools/set-up-private-pool-environment) "peeredNetworkIpRange": "A String", # Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. }, + "privateServiceConnect": { # Defines the Private Service Connect network configuration for the pool. # Immutable. Private Service Connect(PSC) Network configuration for the pool. + "networkAttachment": "A String", # Required. Immutable. The network attachment that the worker network interface is peered to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments) + "publicIpAddressDisabled": True or False, # Required. Immutable. Disable public IP on the primary network interface. If true, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured. If false, workers are created with a public address which allows for public internet egress. The public address only applies to traffic through the primary network interface. If `route_all_traffic` is set to true, all traffic will go through the non-primary network interface, this boolean has no effect. + "routeAllTraffic": True or False, # Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface. + }, "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. "machineType": "A String", # Optional. Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. @@ -329,6 +344,11 @@

Method Details

"peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/build/docs/private-pools/set-up-private-pool-environment) "peeredNetworkIpRange": "A String", # Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. }, + "privateServiceConnect": { # Defines the Private Service Connect network configuration for the pool. # Immutable. Private Service Connect(PSC) Network configuration for the pool. + "networkAttachment": "A String", # Required. Immutable. The network attachment that the worker network interface is peered to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments) + "publicIpAddressDisabled": True or False, # Required. Immutable. Disable public IP on the primary network interface. If true, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured. If false, workers are created with a public address which allows for public internet egress. The public address only applies to traffic through the primary network interface. If `route_all_traffic` is set to true, all traffic will go through the non-primary network interface, this boolean has no effect. + "routeAllTraffic": True or False, # Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface. + }, "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. "machineType": "A String", # Optional. Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json index eb881b2dab..9701a72d1a 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json @@ -2346,7 +2346,7 @@ } } }, -"revision": "20240923", +"revision": "20241025", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ApprovalConfig": { @@ -4631,6 +4631,10 @@ false "$ref": "NetworkConfig", "description": "Network configuration for the pool." }, +"privateServiceConnect": { +"$ref": "PrivateServiceConnect", +"description": "Immutable. Private Service Connect(PSC) Network configuration for the pool." +}, "workerConfig": { "$ref": "WorkerConfig", "description": "Machine configuration for the workers in the pool." @@ -4638,6 +4642,25 @@ false }, "type": "object" }, +"PrivateServiceConnect": { +"description": "Defines the Private Service Connect network configuration for the pool.", +"id": "PrivateServiceConnect", +"properties": { +"networkAttachment": { +"description": "Required. Immutable. The network attachment that the worker network interface is peered to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", +"type": "string" +}, +"publicIpAddressDisabled": { +"description": "Required. Immutable. Disable public IP on the primary network interface. If true, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured. If false, workers are created with a public address which allows for public internet egress. The public address only applies to traffic through the primary network interface. If `route_all_traffic` is set to true, all traffic will go through the non-primary network interface, this boolean has no effect.", +"type": "boolean" +}, +"routeAllTraffic": { +"description": "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", +"type": "boolean" +} +}, +"type": "object" +}, "ProcessAppManifestCallbackOperationMetadata": { "description": "Metadata for `ProcessAppManifestCallback` operation.", "id": "ProcessAppManifestCallbackOperationMetadata", From 8fe597511f9d281d1d7a1f8ff512dee545d9014c Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 05/18] feat(compute): update the api #### compute:alpha The following keys were added: - schemas.NetworkProfile.properties.location.$ref (Total Keys: 1) - schemas.NetworkProfileLocation (Total Keys: 4) #### compute:beta The following keys were added: - schemas.NetworkProfile.properties.location.$ref (Total Keys: 1) - schemas.NetworkProfileLocation (Total Keys: 4) - schemas.ResourceStatus.properties.physicalHostTopology.$ref (Total Keys: 1) - schemas.ResourceStatusPhysicalHostTopology (Total Keys: 6) --- docs/dyn/compute_alpha.backendBuckets.html | 12 ++-- docs/dyn/compute_alpha.backendServices.html | 14 ++-- docs/dyn/compute_alpha.instanceTemplates.html | 16 ++--- docs/dyn/compute_alpha.instances.html | 44 ++++++------ docs/dyn/compute_alpha.machineImages.html | 24 +++---- docs/dyn/compute_alpha.networkProfiles.html | 8 +++ .../compute_alpha.regionBackendServices.html | 12 ++-- ...compute_alpha.regionInstanceTemplates.html | 12 ++-- docs/dyn/compute_alpha.regionInstances.html | 4 +- docs/dyn/compute_alpha.subnetworks.html | 12 ++-- .../compute_alpha.zoneQueuedResources.html | 16 ++--- docs/dyn/compute_beta.backendBuckets.html | 10 +-- docs/dyn/compute_beta.backendServices.html | 14 ++-- docs/dyn/compute_beta.instances.html | 32 ++++++++- docs/dyn/compute_beta.networkProfiles.html | 8 +++ .../compute_beta.regionBackendServices.html | 12 ++-- docs/dyn/compute_beta.subnetworks.html | 12 ++-- .../documents/compute.alpha.json | 54 ++++++++++++--- .../documents/compute.beta.json | 67 +++++++++++++++++-- 19 files changed, 258 insertions(+), 125 deletions(-) diff --git a/docs/dyn/compute_alpha.backendBuckets.html b/docs/dyn/compute_alpha.backendBuckets.html index 88a6a7a7e4..8c56cf7a7e 100644 --- a/docs/dyn/compute_alpha.backendBuckets.html +++ b/docs/dyn/compute_alpha.backendBuckets.html @@ -543,7 +543,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -657,7 +657,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -852,7 +852,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -945,7 +945,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1054,7 +1054,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1500,7 +1500,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html index 560e990a5b..2842d56702 100644 --- a/docs/dyn/compute_alpha.backendServices.html +++ b/docs/dyn/compute_alpha.backendServices.html @@ -343,7 +343,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1135,7 +1135,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1717,7 +1717,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2333,7 +2333,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2847,7 +2847,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -3377,7 +3377,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -4376,7 +4376,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_alpha.instanceTemplates.html b/docs/dyn/compute_alpha.instanceTemplates.html index 5d18d2a3d8..6c01d844c8 100644 --- a/docs/dyn/compute_alpha.instanceTemplates.html +++ b/docs/dyn/compute_alpha.instanceTemplates.html @@ -347,7 +347,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -359,7 +359,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -893,7 +893,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -905,7 +905,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1306,7 +1306,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1318,7 +1318,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1805,7 +1805,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1817,7 +1817,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. diff --git a/docs/dyn/compute_alpha.instances.html b/docs/dyn/compute_alpha.instances.html index 90bc0b6ab3..80f7340274 100644 --- a/docs/dyn/compute_alpha.instances.html +++ b/docs/dyn/compute_alpha.instances.html @@ -79,7 +79,7 @@

Instance Methods

Adds an access config to an instance's network interface.

addNetworkInterface(project, zone, instance, body=None, requestId=None, x__xgafv=None)

-

Adds a network interface to an instance.

+

Adds one dynamic network interface to an active instance.

addResourcePolicies(project, zone, instance, body=None, requestId=None, x__xgafv=None)

Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.

@@ -106,7 +106,7 @@

Instance Methods

Deletes an access config from an instance's network interface.

deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)

-

Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.

+

Deletes one dynamic network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - dynamic network interface to be deleted, using network_interface_name field;

detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)

Detaches a disk from an instance.

@@ -402,7 +402,7 @@

Method Details

addNetworkInterface(project, zone, instance, body=None, requestId=None, x__xgafv=None) -
Adds a network interface to an instance.
+  
Adds one dynamic network interface to an active instance.
 
 Args:
   project: string, Project ID for this request. (required)
@@ -461,7 +461,7 @@ 

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -473,7 +473,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). @@ -982,7 +982,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -994,7 +994,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { @@ -1725,7 +1725,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1737,7 +1737,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -2255,13 +2255,13 @@

Method Details

deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None) -
Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.
+  
Deletes one dynamic network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - dynamic network interface to be deleted, using network_interface_name field;
 
 Args:
   project: string, Project ID for this request. (required)
   zone: string, The name of the zone for this request. (required)
   instance: string, The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer. (required)
-  networkInterfaceName: string, The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported. (required)
+  networkInterfaceName: string, The name of the dynamic network interface to be deleted from the instance. (required)
   requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
   x__xgafv: string, V1 error format.
     Allowed values
@@ -2755,7 +2755,7 @@ 

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -2767,7 +2767,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { @@ -3475,7 +3475,7 @@

Method Details

zone: string, The name of the zone for this request. (required) instance: string, Name of the instance for this request. (required) port: integer, Specifies which COM or serial port to retrieve data from. - start: string, Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. + start: string, Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. Note that the negative start is bounded by the retained buffer size, and the returned serial console output will not exceed the max buffer size. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3796,7 +3796,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -3808,7 +3808,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { @@ -4407,7 +4407,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -4419,7 +4419,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { @@ -8518,7 +8518,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -8530,7 +8530,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { @@ -9225,7 +9225,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -9237,7 +9237,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). diff --git a/docs/dyn/compute_alpha.machineImages.html b/docs/dyn/compute_alpha.machineImages.html index d0818cb7b9..e487688ea0 100644 --- a/docs/dyn/compute_alpha.machineImages.html +++ b/docs/dyn/compute_alpha.machineImages.html @@ -459,7 +459,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -471,7 +471,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -728,7 +728,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -740,7 +740,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "postKeyRevocationActionType": "A String", # PostKeyRevocationActionType of the instance. @@ -1086,7 +1086,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1098,7 +1098,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1355,7 +1355,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1367,7 +1367,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "postKeyRevocationActionType": "A String", # PostKeyRevocationActionType of the instance. @@ -1795,7 +1795,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1807,7 +1807,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -2064,7 +2064,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -2076,7 +2076,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "postKeyRevocationActionType": "A String", # PostKeyRevocationActionType of the instance. diff --git a/docs/dyn/compute_alpha.networkProfiles.html b/docs/dyn/compute_alpha.networkProfiles.html index 2622a3f77c..978f87aee8 100644 --- a/docs/dyn/compute_alpha.networkProfiles.html +++ b/docs/dyn/compute_alpha.networkProfiles.html @@ -144,6 +144,10 @@

Method Details

}, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#networkProfile", # [Output Only] Type of the resource. Always compute#networkProfile for network profiles. + "location": { # [Output Only] Location to which the network is restricted. + "name": "A String", + "scope": "A String", + }, "name": "A String", # [Output Only] Name of the resource. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. @@ -211,6 +215,10 @@

Method Details

}, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#networkProfile", # [Output Only] Type of the resource. Always compute#networkProfile for network profiles. + "location": { # [Output Only] Location to which the network is restricted. + "name": "A String", + "scope": "A String", + }, "name": "A String", # [Output Only] Name of the resource. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index 2333f1804c..21f40ee95f 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -316,7 +316,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -901,7 +901,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1518,7 +1518,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2033,7 +2033,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2564,7 +2564,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -3435,7 +3435,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_alpha.regionInstanceTemplates.html b/docs/dyn/compute_alpha.regionInstanceTemplates.html index fac2c6a728..6ab06f96d7 100644 --- a/docs/dyn/compute_alpha.regionInstanceTemplates.html +++ b/docs/dyn/compute_alpha.regionInstanceTemplates.html @@ -458,7 +458,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -470,7 +470,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -823,7 +823,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -835,7 +835,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1323,7 +1323,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1335,7 +1335,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. diff --git a/docs/dyn/compute_alpha.regionInstances.html b/docs/dyn/compute_alpha.regionInstances.html index a368ff1750..0d57fbf0e5 100644 --- a/docs/dyn/compute_alpha.regionInstances.html +++ b/docs/dyn/compute_alpha.regionInstances.html @@ -300,7 +300,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -312,7 +312,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. diff --git a/docs/dyn/compute_alpha.subnetworks.html b/docs/dyn/compute_alpha.subnetworks.html index 780bfc4076..97b04720f1 100644 --- a/docs/dyn/compute_alpha.subnetworks.html +++ b/docs/dyn/compute_alpha.subnetworks.html @@ -184,7 +184,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -564,7 +564,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -680,7 +680,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -876,7 +876,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -941,7 +941,7 @@

Method Details

"ipCidrRange": "A String", # The range of internal addresses that are owned by this subnetwork. "ipv6AccessType": "A String", # The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation or the first time the subnet is updated into IPV4_IPV6 dual stack. "network": "A String", # Network URL. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. "secondaryIpRanges": [ # Secondary IP ranges. { # Secondary IP range of a usable subnetwork. @@ -1061,7 +1061,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. diff --git a/docs/dyn/compute_alpha.zoneQueuedResources.html b/docs/dyn/compute_alpha.zoneQueuedResources.html index 47a36ba0d9..99de6e7476 100644 --- a/docs/dyn/compute_alpha.zoneQueuedResources.html +++ b/docs/dyn/compute_alpha.zoneQueuedResources.html @@ -338,7 +338,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -350,7 +350,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1089,7 +1089,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1101,7 +1101,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -1533,7 +1533,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -1545,7 +1545,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. @@ -2107,7 +2107,7 @@

Method Details

"networkAttachment": "A String", # The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. "networkIP": "A String", # An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. "nicType": "A String", # The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. - "parentNicName": "A String", # Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set. + "parentNicName": "A String", # Name of the parent network interface of a dynamic network interface. "queueCount": 42, # The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. "stackType": "A String", # The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. "subinterfaces": [ # SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. @@ -2119,7 +2119,7 @@

Method Details

}, ], "subnetwork": "A String", # The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork - "vlan": 42, # VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set. + "vlan": 42, # VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively. }, ], "networkPerformanceConfig": { # Note that for MachineImage, this is not supported yet. diff --git a/docs/dyn/compute_beta.backendBuckets.html b/docs/dyn/compute_beta.backendBuckets.html index 5541d9c903..511aebf133 100644 --- a/docs/dyn/compute_beta.backendBuckets.html +++ b/docs/dyn/compute_beta.backendBuckets.html @@ -534,7 +534,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -647,7 +647,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -840,7 +840,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -934,7 +934,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1377,7 +1377,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_beta.backendServices.html b/docs/dyn/compute_beta.backendServices.html index 461645f599..f7c55d298f 100644 --- a/docs/dyn/compute_beta.backendServices.html +++ b/docs/dyn/compute_beta.backendServices.html @@ -342,7 +342,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -885,7 +885,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1220,7 +1220,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1588,7 +1588,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1855,7 +1855,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2138,7 +2138,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2887,7 +2887,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_beta.instances.html b/docs/dyn/compute_beta.instances.html index 9893de04f3..13546884bd 100644 --- a/docs/dyn/compute_beta.instances.html +++ b/docs/dyn/compute_beta.instances.html @@ -798,6 +798,12 @@

Method Details

], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. + "physicalHostTopology": { # Represents the physical host topology of the host on which the VM is running. # [Output Only] The physical host topology of the host on which the VM is running. + "block": "A String", + "cluster": "A String", + "host": "A String", + "subblock": "A String", + }, "scheduling": { "availabilityDomain": 42, # Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. "terminationTimestamp": "A String", # Time in future when the instance will be terminated in RFC3339 text format. @@ -2265,6 +2271,12 @@

Method Details

], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. + "physicalHostTopology": { # Represents the physical host topology of the host on which the VM is running. # [Output Only] The physical host topology of the host on which the VM is running. + "block": "A String", + "cluster": "A String", + "host": "A String", + "subblock": "A String", + }, "scheduling": { "availabilityDomain": 42, # Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. "terminationTimestamp": "A String", # Time in future when the instance will be terminated in RFC3339 text format. @@ -2828,7 +2840,7 @@

Method Details

zone: string, The name of the zone for this request. (required) instance: string, Name of the instance for this request. (required) port: integer, Specifies which COM or serial port to retrieve data from. - start: string, Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. + start: string, Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. Note that the negative start is bounded by the retained buffer size, and the returned serial console output will not exceed the max buffer size. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3164,6 +3176,12 @@

Method Details

], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. + "physicalHostTopology": { # Represents the physical host topology of the host on which the VM is running. # [Output Only] The physical host topology of the host on which the VM is running. + "block": "A String", + "cluster": "A String", + "host": "A String", + "subblock": "A String", + }, "scheduling": { "availabilityDomain": 42, # Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. "terminationTimestamp": "A String", # Time in future when the instance will be terminated in RFC3339 text format. @@ -3655,6 +3673,12 @@

Method Details

], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. + "physicalHostTopology": { # Represents the physical host topology of the host on which the VM is running. # [Output Only] The physical host topology of the host on which the VM is running. + "block": "A String", + "cluster": "A String", + "host": "A String", + "subblock": "A String", + }, "scheduling": { "availabilityDomain": 42, # Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. "terminationTimestamp": "A String", # Time in future when the instance will be terminated in RFC3339 text format. @@ -7461,6 +7485,12 @@

Method Details

], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. + "physicalHostTopology": { # Represents the physical host topology of the host on which the VM is running. # [Output Only] The physical host topology of the host on which the VM is running. + "block": "A String", + "cluster": "A String", + "host": "A String", + "subblock": "A String", + }, "scheduling": { "availabilityDomain": 42, # Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. "terminationTimestamp": "A String", # Time in future when the instance will be terminated in RFC3339 text format. diff --git a/docs/dyn/compute_beta.networkProfiles.html b/docs/dyn/compute_beta.networkProfiles.html index 8d0df9e050..5d00eafafd 100644 --- a/docs/dyn/compute_beta.networkProfiles.html +++ b/docs/dyn/compute_beta.networkProfiles.html @@ -144,6 +144,10 @@

Method Details

}, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#networkProfile", # [Output Only] Type of the resource. Always compute#networkProfile for network profiles. + "location": { # [Output Only] Location to which the network is restricted. + "name": "A String", + "scope": "A String", + }, "name": "A String", # [Output Only] Name of the resource. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. @@ -211,6 +215,10 @@

Method Details

}, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#networkProfile", # [Output Only] Type of the resource. Always compute#networkProfile for network profiles. + "location": { # [Output Only] Location to which the network is restricted. + "name": "A String", + "scope": "A String", + }, "name": "A String", # [Output Only] Name of the resource. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. diff --git a/docs/dyn/compute_beta.regionBackendServices.html b/docs/dyn/compute_beta.regionBackendServices.html index 2ddb6e1907..15408c778a 100644 --- a/docs/dyn/compute_beta.regionBackendServices.html +++ b/docs/dyn/compute_beta.regionBackendServices.html @@ -315,7 +315,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -653,7 +653,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1022,7 +1022,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1290,7 +1290,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -1574,7 +1574,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. @@ -2196,7 +2196,7 @@

Method Details

"A String", ], }, - "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. + "cacheMode": "A String", # Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC. "clientTtl": 42, # Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). "defaultTtl": 42, # Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. "maxTtl": 42, # Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. diff --git a/docs/dyn/compute_beta.subnetworks.html b/docs/dyn/compute_beta.subnetworks.html index 2726d98b43..6ab23a1d1c 100644 --- a/docs/dyn/compute_beta.subnetworks.html +++ b/docs/dyn/compute_beta.subnetworks.html @@ -177,7 +177,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -544,7 +544,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -649,7 +649,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -833,7 +833,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. @@ -894,7 +894,7 @@

Method Details

"ipCidrRange": "A String", # The range of internal addresses that are owned by this subnetwork. "ipv6AccessType": "A String", # The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation or the first time the subnet is updated into IPV4_IPV6 dual stack. "network": "A String", # Network URL. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. "secondaryIpRanges": [ # Secondary IP ranges. { # Secondary IP range of a usable subnetwork. @@ -989,7 +989,7 @@

Method Details

"network": "A String", # The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. This field can be set only at resource creation time. "privateIpGoogleAccess": True or False, # Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess. "privateIpv6GoogleAccess": "A String", # This field is for internal use. This field can be both set at resource creation time and updated using patch. - "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. + "purpose": "A String", # The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. "region": "A String", # URL of the region where the Subnetwork resides. This field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. "role": "A String", # The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request. diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index 963febab98..1e1e97d1db 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -11621,7 +11621,7 @@ ] }, "addNetworkInterface": { -"description": "Adds a network interface to an instance.", +"description": "Adds one dynamic network interface to an active instance.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", "httpMethod": "POST", "id": "compute.instances.addNetworkInterface", @@ -11996,7 +11996,7 @@ ] }, "deleteNetworkInterface": { -"description": "Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.", +"description": "Deletes one dynamic network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - dynamic network interface to be deleted, using network_interface_name field;", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/deleteNetworkInterface", "httpMethod": "POST", "id": "compute.instances.deleteNetworkInterface", @@ -12014,7 +12014,7 @@ "type": "string" }, "networkInterfaceName": { -"description": "The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported.", +"description": "The name of the dynamic network interface to be deleted from the instance.", "location": "query", "required": true, "type": "string" @@ -12438,7 +12438,7 @@ "type": "string" }, "start": { -"description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", +"description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. Note that the negative start is bounded by the retained buffer size, and the returned serial console output will not exceed the max buffer size.", "format": "int64", "location": "query", "type": "string" @@ -47179,7 +47179,7 @@ } } }, -"revision": "20241015", +"revision": "20241021", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -50161,7 +50161,7 @@ false "description": "The CacheKeyPolicy for this CdnPolicy." }, "cacheMode": { -"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", +"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.", "enum": [ "CACHE_ALL_STATIC", "FORCE_CACHE_ALL", @@ -51152,7 +51152,7 @@ false "description": "The CacheKeyPolicy for this CdnPolicy." }, "cacheMode": { -"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", +"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.", "enum": [ "CACHE_ALL_STATIC", "FORCE_CACHE_ALL", @@ -75595,19 +75595,23 @@ false "enum": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "UNSPECIFIED_NIC_TYPE", "VIRTIO_NET" ], "enumDescriptions": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "No type specified.", "VIRTIO" ], "type": "string" }, "parentNicName": { -"description": "Name of the parent network interface of a VLAN based nic. If this field is specified, vlan must be set.", +"description": "Name of the parent network interface of a dynamic network interface.", "type": "string" }, "queueCount": { @@ -75641,7 +75645,7 @@ false "type": "string" }, "vlan": { -"description": "VLAN tag of a VLAN based network interface, must be in range from 2 to 4094 inclusively. This field is mandatory if the parent network interface name is set.", +"description": "VLAN tag of a dynamic network interface, must be in range from 2 to 4094 inclusively.", "format": "int32", "type": "integer" } @@ -75962,6 +75966,10 @@ false "description": "[Output Only] Type of the resource. Always compute#networkProfile for network profiles.", "type": "string" }, +"location": { +"$ref": "NetworkProfileLocation", +"description": "[Output Only] Location to which the network is restricted." +}, "name": { "description": "[Output Only] Name of the resource.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -75982,6 +75990,26 @@ false }, "type": "object" }, +"NetworkProfileLocation": { +"id": "NetworkProfileLocation", +"properties": { +"name": { +"type": "string" +}, +"scope": { +"enum": [ +"REGION", +"ZONE" +], +"enumDescriptions": [ +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, "NetworkProfileNetworkFeatures": { "id": "NetworkProfileNetworkFeatures", "properties": { @@ -76224,12 +76252,16 @@ false "enum": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "UNSPECIFIED_NIC_TYPE", "VIRTIO_NET" ], "enumDescriptions": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "No type specified.", "VIRTIO" ], @@ -97266,7 +97298,7 @@ false "type": "string" }, "purpose": { -"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", +"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", "enum": [ "AGGREGATE", "CLOUD_EXTENSION", @@ -102923,7 +102955,7 @@ false "type": "string" }, "purpose": { -"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", +"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", "enum": [ "AGGREGATE", "CLOUD_EXTENSION", diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index 2b62efb3b5..b7de59f0cf 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -11842,7 +11842,7 @@ "type": "string" }, "start": { -"description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", +"description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console. Note that the negative start is bounded by the retained buffer size, and the returned serial console output will not exceed the max buffer size.", "format": "int64", "location": "query", "type": "string" @@ -42695,7 +42695,7 @@ } } }, -"revision": "20241015", +"revision": "20241021", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -45542,7 +45542,7 @@ false "description": "The CacheKeyPolicy for this CdnPolicy." }, "cacheMode": { -"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", +"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.", "enum": [ "CACHE_ALL_STATIC", "FORCE_CACHE_ALL", @@ -46352,7 +46352,7 @@ false "description": "The CacheKeyPolicy for this CdnPolicy." }, "cacheMode": { -"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", +"description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. If no value is provided for cdnPolicy.cacheMode, it defaults to CACHE_ALL_STATIC.", "enum": [ "CACHE_ALL_STATIC", "FORCE_CACHE_ALL", @@ -67317,12 +67317,16 @@ false "enum": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "UNSPECIFIED_NIC_TYPE", "VIRTIO_NET" ], "enumDescriptions": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "No type specified.", "VIRTIO" ], @@ -67621,6 +67625,10 @@ false "description": "[Output Only] Type of the resource. Always compute#networkProfile for network profiles.", "type": "string" }, +"location": { +"$ref": "NetworkProfileLocation", +"description": "[Output Only] Location to which the network is restricted." +}, "name": { "description": "[Output Only] Name of the resource.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -67641,6 +67649,26 @@ false }, "type": "object" }, +"NetworkProfileLocation": { +"id": "NetworkProfileLocation", +"properties": { +"name": { +"type": "string" +}, +"scope": { +"enum": [ +"REGION", +"ZONE" +], +"enumDescriptions": [ +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, "NetworkProfileNetworkFeatures": { "id": "NetworkProfileNetworkFeatures", "properties": { @@ -67881,12 +67909,16 @@ false "enum": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "UNSPECIFIED_NIC_TYPE", "VIRTIO_NET" ], "enumDescriptions": [ "GVNIC", "IDPF", +"IRDMA", +"MRDMA", "No type specified.", "VIRTIO" ], @@ -77632,6 +77664,10 @@ false "description": "[Output Only] An opaque ID of the host on which the VM is running.", "type": "string" }, +"physicalHostTopology": { +"$ref": "ResourceStatusPhysicalHostTopology", +"description": "[Output Only] The physical host topology of the host on which the VM is running." +}, "scheduling": { "$ref": "ResourceStatusScheduling" }, @@ -77641,6 +77677,25 @@ false }, "type": "object" }, +"ResourceStatusPhysicalHostTopology": { +"description": "Represents the physical host topology of the host on which the VM is running.", +"id": "ResourceStatusPhysicalHostTopology", +"properties": { +"block": { +"type": "string" +}, +"cluster": { +"type": "string" +}, +"host": { +"type": "string" +}, +"subblock": { +"type": "string" +} +}, +"type": "object" +}, "ResourceStatusScheduling": { "id": "ResourceStatusScheduling", "properties": { @@ -86353,7 +86408,7 @@ false "type": "string" }, "purpose": { -"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", +"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", "enum": [ "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", @@ -91698,7 +91753,7 @@ false "type": "string" }, "purpose": { -"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", +"description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, or PRIVATE_SERVICE_CONNECT. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.", "enum": [ "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", From a694aa604a1b4237ffcd764df4cad69d8725c7f6 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 06/18] feat(container): update the api #### container:v1 The following keys were added: - schemas.ClusterUpdate.properties.desiredEnterpriseConfig.$ref (Total Keys: 1) - schemas.DesiredEnterpriseConfig (Total Keys: 3) - schemas.EnterpriseConfig.properties.desiredTier.type (Total Keys: 1) --- ...tainer_v1.projects.locations.clusters.html | 6 +++ .../container_v1.projects.zones.clusters.html | 6 +++ .../documents/container.v1.json | 41 ++++++++++++++++++- 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index d076da29aa..4cfee1d5e5 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -465,6 +465,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -1555,6 +1556,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -2548,6 +2550,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -4389,6 +4392,9 @@

Method Details

"desiredEnableFqdnNetworkPolicy": True or False, # Enable/Disable FQDN Network Policy for the cluster. "desiredEnableMultiNetworking": True or False, # Enable/Disable Multi-Networking for the cluster "desiredEnablePrivateEndpoint": True or False, # Enable/Disable private endpoint for the cluster's master. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.enable_public_endpoint instead. Note that the value of enable_public_endpoint is reversed: if enable_private_endpoint is false, then enable_public_endpoint will be true. + "desiredEnterpriseConfig": { # DesiredEnterpriseConfig is a wrapper used for updating enterprise_config. # The desired enterprise configuration for the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. + }, "desiredFleet": { # Fleet is the fleet configuration for the cluster. # The desired fleet configuration for the cluster. "membership": "A String", # Output only. The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. "preRegistered": True or False, # Output only. Whether the cluster has been registered through the fleet API. diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index 0eedc3669a..0b43139f18 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -556,6 +556,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -1646,6 +1647,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -2683,6 +2685,7 @@

Method Details

"endpoint": "A String", # Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information. "enterpriseConfig": { # EnterpriseConfig is the cluster enterprise configuration. # GKE Enterprise Configuration. "clusterTier": "A String", # Output only. cluster_tier indicates the effective tier of the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. }, "etag": "A String", # This checksum is computed by the server based on the value of cluster fields, and may be sent on update requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. @@ -4416,6 +4419,9 @@

Method Details

"desiredEnableFqdnNetworkPolicy": True or False, # Enable/Disable FQDN Network Policy for the cluster. "desiredEnableMultiNetworking": True or False, # Enable/Disable Multi-Networking for the cluster "desiredEnablePrivateEndpoint": True or False, # Enable/Disable private endpoint for the cluster's master. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.enable_public_endpoint instead. Note that the value of enable_public_endpoint is reversed: if enable_private_endpoint is false, then enable_public_endpoint will be true. + "desiredEnterpriseConfig": { # DesiredEnterpriseConfig is a wrapper used for updating enterprise_config. # The desired enterprise configuration for the cluster. + "desiredTier": "A String", # desired_tier specifies the desired tier of the cluster. + }, "desiredFleet": { # Fleet is the fleet configuration for the cluster. # The desired fleet configuration for the cluster. "membership": "A String", # Output only. The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. "preRegistered": True or False, # Output only. Whether the cluster has been registered through the fleet API. diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index 6a4506347f..234df7f8ad 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2540,7 +2540,7 @@ } } }, -"revision": "20241008", +"revision": "20241017", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3649,6 +3649,10 @@ "description": "Enable/Disable private endpoint for the cluster's master. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.enable_public_endpoint instead. Note that the value of enable_public_endpoint is reversed: if enable_private_endpoint is false, then enable_public_endpoint will be true.", "type": "boolean" }, +"desiredEnterpriseConfig": { +"$ref": "DesiredEnterpriseConfig", +"description": "The desired enterprise configuration for the cluster." +}, "desiredFleet": { "$ref": "Fleet", "description": "The desired fleet configuration for the cluster." @@ -4213,6 +4217,27 @@ }, "type": "object" }, +"DesiredEnterpriseConfig": { +"description": "DesiredEnterpriseConfig is a wrapper used for updating enterprise_config.", +"id": "DesiredEnterpriseConfig", +"properties": { +"desiredTier": { +"description": "desired_tier specifies the desired tier of the cluster.", +"enum": [ +"CLUSTER_TIER_UNSPECIFIED", +"STANDARD", +"ENTERPRISE" +], +"enumDescriptions": [ +"CLUSTER_TIER_UNSPECIFIED is when cluster_tier is not set.", +"STANDARD indicates a standard GKE cluster.", +"ENTERPRISE indicates a GKE Enterprise cluster." +], +"type": "string" +} +}, +"type": "object" +}, "DnsCacheConfig": { "description": "Configuration for NodeLocal DNSCache", "id": "DnsCacheConfig", @@ -4248,6 +4273,20 @@ ], "readOnly": true, "type": "string" +}, +"desiredTier": { +"description": "desired_tier specifies the desired tier of the cluster.", +"enum": [ +"CLUSTER_TIER_UNSPECIFIED", +"STANDARD", +"ENTERPRISE" +], +"enumDescriptions": [ +"CLUSTER_TIER_UNSPECIFIED is when cluster_tier is not set.", +"STANDARD indicates a standard GKE cluster.", +"ENTERPRISE indicates a GKE Enterprise cluster." +], +"type": "string" } }, "type": "object" From 3d4eb75730bd4d210b013e44ed0a50a7bb48af66 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 07/18] feat(discoveryengine): update the api #### discoveryengine:v1 The following keys were added: - resources.projects.resources.locations.resources.collections.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - schemas.GoogleApiDistribution (Total Keys: 56) - schemas.GoogleApiMetric (Total Keys: 5) - schemas.GoogleApiMonitoredResource (Total Keys: 11) - schemas.GoogleCloudDiscoveryengineV1AnswerQueryRequest.properties.groundingSpec.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1AnswerQueryRequestGroundingSpec (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse (Total Keys: 7) - schemas.GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleMonitoringV3Point (Total Keys: 4) - schemas.GoogleMonitoringV3TimeInterval (Total Keys: 6) - schemas.GoogleMonitoringV3TimeSeries (Total Keys: 10) - schemas.GoogleMonitoringV3TypedValue (Total Keys: 9) #### discoveryengine:v1alpha The following keys were added: - resources.projects.resources.locations.resources.collections.resources.dataStores.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.dataStores.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - schemas.GoogleApiMetric (Total Keys: 5) - schemas.GoogleApiMonitoredResourceMetadata (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest (Total Keys: 19) - schemas.GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse (Total Keys: 35) - schemas.GoogleCloudDiscoveryengineV1alphaAnswerQueryRequest.properties.groundingSpec.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestGroundingSpec (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse (Total Keys: 7) - schemas.GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleMonitoringV3Point (Total Keys: 4) - schemas.GoogleMonitoringV3TimeInterval (Total Keys: 6) - schemas.GoogleMonitoringV3TimeSeries (Total Keys: 10) #### discoveryengine:v1beta The following keys were added: - resources.projects.resources.locations.resources.collections.resources.dataStores.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.resources.servingConfigs.methods.searchLite (Total Keys: 12) - resources.projects.resources.locations.resources.dataStores.resources.completionConfig.methods.completeQuery (Total Keys: 12) - resources.projects.resources.locations.resources.dataStores.resources.servingConfigs.methods.searchLite (Total Keys: 12) - schemas.GoogleApiDistribution (Total Keys: 56) - schemas.GoogleApiMetric (Total Keys: 5) - schemas.GoogleApiMonitoredResource (Total Keys: 11) - schemas.GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse (Total Keys: 7) - schemas.GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest (Total Keys: 19) - schemas.GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse (Total Keys: 35) - schemas.GoogleCloudDiscoveryengineV1betaAnswerQueryRequest.properties.groundingSpec.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaAnswerQueryRequestGroundingSpec (Total Keys: 4) - schemas.GoogleMonitoringV3Point (Total Keys: 4) - schemas.GoogleMonitoringV3TimeInterval (Total Keys: 6) - schemas.GoogleMonitoringV3TimeSeries (Total Keys: 10) - schemas.GoogleMonitoringV3TypedValue (Total Keys: 9) --- ...collections.dataStores.servingConfigs.html | 320 ++++++- ...ons.collections.dataStores.userEvents.html | 6 +- ...ns.collections.engines.servingConfigs.html | 320 ++++++- ...s.locations.dataStores.servingConfigs.html | 320 ++++++- ...jects.locations.dataStores.userEvents.html | 6 +- ...gine_v1.projects.locations.userEvents.html | 4 +- ...llections.dataStores.completionConfig.html | 256 +++++ ...ects.locations.collections.dataStores.html | 5 + ...collections.dataStores.servingConfigs.html | 516 +++++++++- ...ons.collections.dataStores.userEvents.html | 6 +- ....collections.engines.completionConfig.html | 256 +++++ ...rojects.locations.collections.engines.html | 5 + ...ns.collections.engines.servingConfigs.html | 516 +++++++++- ...locations.dataStores.completionConfig.html | 256 +++++ ...v1alpha.projects.locations.dataStores.html | 5 + ...s.locations.dataStores.servingConfigs.html | 516 +++++++++- ...jects.locations.dataStores.userEvents.html | 6 +- ...v1alpha.projects.locations.userEvents.html | 4 +- ...llections.dataStores.completionConfig.html | 230 +++++ ...ects.locations.collections.dataStores.html | 5 + ...collections.dataStores.servingConfigs.html | 487 +++++++++- ...ons.collections.dataStores.userEvents.html | 6 +- ....collections.engines.completionConfig.html | 230 +++++ ...rojects.locations.collections.engines.html | 5 + ...ns.collections.engines.servingConfigs.html | 487 +++++++++- ...locations.dataStores.completionConfig.html | 230 +++++ ..._v1beta.projects.locations.dataStores.html | 5 + ...s.locations.dataStores.servingConfigs.html | 487 +++++++++- ...jects.locations.dataStores.userEvents.html | 6 +- ..._v1beta.projects.locations.userEvents.html | 4 +- .../documents/discoveryengine.v1.json | 537 ++++++++++- .../documents/discoveryengine.v1alpha.json | 662 ++++++++++++- .../documents/discoveryengine.v1beta.json | 889 +++++++++++++++++- 33 files changed, 7548 insertions(+), 45 deletions(-) create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html index 1a587a302b..336c86ca42 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html @@ -86,6 +86,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -115,6 +121,9 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -404,7 +413,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -807,6 +816,315 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.userEvents.html
index 1a871d680b..bd30644d3d 100644
--- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html index 832484c3b6..0cb80556db 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html @@ -86,6 +86,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -115,6 +121,9 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -404,7 +413,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -807,6 +816,315 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html
index f4a296e717..1c750ebd31 100644
--- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html
+++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html
@@ -86,6 +86,12 @@ 

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -115,6 +121,9 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -404,7 +413,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -807,6 +816,315 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1Chunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.userEvents.html
index 4514d26677..620338a6b1 100644
--- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.userEvents.html b/docs/dyn/discoveryengine_v1.projects.locations.userEvents.html index 12664a7d4b..d3088fc039 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.userEvents.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.userEvents.html @@ -158,7 +158,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -244,7 +244,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html new file mode 100644 index 0000000000..1f2aeade01 --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html @@ -0,0 +1,256 @@ + + + +

Discovery Engine API . projects . locations . collections . dataStores . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html index a1572eefd8..7447b71354 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html @@ -79,6 +79,11 @@

Instance Methods

Returns the branches Resource.

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

completionSuggestions()

diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html index aed8b11a5e..55d407ef75 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -867,7 +877,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1478,6 +1488,510 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "customFineTuningSpec": { # Defines custom fine tuning spec. # Custom fine tuning configs. If set, it has higher priority than the configs set in ServingConfig.custom_fine_tuning_spec.
+    "enableSearchAdaptor": True or False, # Whether or not to enable and include custom fine tuned search adaptor model.
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+              "readers": [ # Readers of the document.
+                { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+                  "idpWide": True or False, # All users within the Identity Provider.
+                  "principals": [ # List of principals.
+                    { # Principal identifier of a user or a group.
+                      "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                      "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                    },
+                  ],
+                },
+              ],
+            },
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.userEvents.html
index 7fa2458558..47653fa046 100644
--- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html new file mode 100644 index 0000000000..fe35567e18 --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html @@ -0,0 +1,256 @@ + + + +

Discovery Engine API . projects . locations . collections . engines . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html index b43b946c63..17b4730289 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html @@ -74,6 +74,11 @@

Discovery Engine API . projects . locations . collections . engines

Instance Methods

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

controls()

diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html index 1f8ee3a099..6c834eadb7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -867,7 +877,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1478,6 +1488,510 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "customFineTuningSpec": { # Defines custom fine tuning spec. # Custom fine tuning configs. If set, it has higher priority than the configs set in ServingConfig.custom_fine_tuning_spec.
+    "enableSearchAdaptor": True or False, # Whether or not to enable and include custom fine tuned search adaptor model.
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+              "readers": [ # Readers of the document.
+                { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+                  "idpWide": True or False, # All users within the Identity Provider.
+                  "principals": [ # List of principals.
+                    { # Principal identifier of a user or a group.
+                      "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                      "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                    },
+                  ],
+                },
+              ],
+            },
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html
new file mode 100644
index 0000000000..5257ebaf3b
--- /dev/null
+++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html
@@ -0,0 +1,256 @@
+
+
+
+

Discovery Engine API . projects . locations . dataStores . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html index cd74a1fcd9..7b1940ce6e 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html @@ -79,6 +79,11 @@

Instance Methods

Returns the branches Resource.

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

completionSuggestions()

diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html index 0082e9c343..12debc5db8 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -867,7 +877,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1478,6 +1488,510 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "customFineTuningSpec": { # Defines custom fine tuning spec. # Custom fine tuning configs. If set, it has higher priority than the configs set in ServingConfig.custom_fine_tuning_spec.
+    "enableSearchAdaptor": True or False, # Whether or not to enable and include custom fine tuned search adaptor model.
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+              "readers": [ # Readers of the document.
+                { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+                  "idpWide": True or False, # All users within the Identity Provider.
+                  "principals": [ # List of principals.
+                    { # Principal identifier of a user or a group.
+                      "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                      "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                    },
+                  ],
+                },
+              ],
+            },
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "aclInfo": { # ACL Information of the Document. # Access control information for the document.
+          "readers": [ # Readers of the document.
+            { # AclRestriction to model complex inheritance restrictions. Example: Modeling a "Both Permit" inheritance, where to access a child document, user needs to have access to parent document. Document Hierarchy - Space_S --> Page_P. Readers: Space_S: group_1, user_1 Page_P: group_2, group_3, user_2 Space_S ACL Restriction - { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ] } ] } } Page_P ACL Restriction. { "acl_info": { "readers": [ { "principals": [ { "group_id": "group_2" }, { "group_id": "group_3" }, { "user_id": "user_2" } ], }, { "principals": [ { "group_id": "group_1" }, { "user_id": "user_1" } ], } ] } }
+              "idpWide": True or False, # All users within the Identity Provider.
+              "principals": [ # List of principals.
+                { # Principal identifier of a user or a group.
+                  "groupId": "A String", # Group identifier. For Google Workspace user account, group_id should be the google workspace group email. For non-google identity provider user account, group_id is the mapped group identifier configured during the workforcepool config.
+                  "userId": "A String", # User identifier. For Google Workspace user account, user_id should be the google workspace user email. For non-google identity provider user account, user_id is the mapped user identifier configured during the workforcepool config.
+                },
+              ],
+            },
+          ],
+        },
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.userEvents.html
index 774181f50d..2549ef141c 100644
--- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.userEvents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.userEvents.html index 0b393055b4..222e21a68c 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.userEvents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.userEvents.html @@ -158,7 +158,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -244,7 +244,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html new file mode 100644 index 0000000000..81c09e65f9 --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html @@ -0,0 +1,230 @@ + + + +

Discovery Engine API . projects . locations . collections . dataStores . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html index ceb6649675..bea4757bad 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html @@ -79,6 +79,11 @@

Instance Methods

Returns the branches Resource.

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

completionSuggestions()

diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html index 122eb8af46..6a672e442b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -832,7 +842,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1401,6 +1411,481 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.userEvents.html
index c54c864185..f454b34fc4 100644
--- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html new file mode 100644 index 0000000000..9497badf53 --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html @@ -0,0 +1,230 @@ + + + +

Discovery Engine API . projects . locations . collections . engines . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html index 419370449b..1341804252 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html @@ -74,6 +74,11 @@

Discovery Engine API . projects . locations . collections . engines

Instance Methods

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

controls()

diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html index 385e018b56..4c710baa46 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -832,7 +842,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1401,6 +1411,481 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html
new file mode 100644
index 0000000000..69bb83a505
--- /dev/null
+++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html
@@ -0,0 +1,230 @@
+
+
+
+

Discovery Engine API . projects . locations . dataStores . completionConfig

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ completeQuery(completionConfig, body=None, x__xgafv=None)

+

Completes the user input with advanced keyword suggestions.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ completeQuery(completionConfig, body=None, x__xgafv=None) +
Completes the user input with advanced keyword suggestions.
+
+Args:
+  completionConfig: string, Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for CompletionService.AdvancedCompleteQuery method. .
+  "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition.
+    "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.
+      { # Boost applies to suggestions which match a condition.
+        "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.
+        "condition": "A String", # An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY("en", "fr"))`
+      },
+    ],
+  },
+  "includeTailSuggestions": True or False, # Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.
+  "query": "A String", # Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called "zero prefix" feature, which returns user's recently searched queries given the empty query.
+  "queryModel": "A String", # Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.
+  "suggestionTypes": [ # Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.
+    "A String",
+  ],
+  "userInfo": { # Information of an end user. # Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for CompletionService.AdvancedCompleteQuery method.
+  "contentSuggestions": [ # Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as content.
+      "contentType": "A String", # The type of the content suggestion.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields will be populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "peopleSuggestions": [ # Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions as people.
+      "dataStore": "A String", # The name of the dataStore that this suggestion belongs to.
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the suggestion. Only a subset of fields is populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "personType": "A String", # The type of the person.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "querySuggestions": [ # Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.
+    { # Suggestions as search queries.
+      "completableFieldPaths": [ # The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.
+        "A String",
+      ],
+      "dataStore": [ # The name of the dataStore that this suggestion belongs to.
+        "A String",
+      ],
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "recentSearchSuggestions": [ # Results of the matched "recent search" suggestions. The result list is ordered and the first result is the top suggestion.
+    { # Suggestions from recent search history.
+      "recentSearchTime": "A String", # The time when this recent rearch happened.
+      "suggestion": "A String", # The suggestion for the query.
+    },
+  ],
+  "tailMatchTriggered": True or False, # True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html index d071c52fe3..f3381073cb 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html @@ -79,6 +79,11 @@

Instance Methods

Returns the branches Resource.

+

+ completionConfig() +

+

Returns the completionConfig Resource.

+

completionSuggestions()

diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html index 2d71b4412b..86cd1501a5 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html @@ -98,6 +98,12 @@

Instance Methods

search(servingConfig, body=None, x__xgafv=None)

Performs a search.

+

+ searchLite(servingConfig, body=None, x__xgafv=None)

+

Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.

+

+ searchLite_next()

+

Retrieves the next page of results.

search_next()

Retrieves the next page of results.

@@ -127,6 +133,10 @@

Method Details

}, }, "asynchronousMode": True or False, # Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method. + "groundingSpec": { # Grounding specification. # Optional. Grounding specification. + "filteringLevel": "A String", # Optional. Specifies whether to enable the filtering based on grounding score and at what level. + "includeGroundingSupports": True or False, # Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim. + }, "query": { # Defines a user inputed query. # Required. Current user query. "queryId": "A String", # Unique Id for the query. "text": "A String", # Plain text. @@ -832,7 +842,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -1401,6 +1411,481 @@

Method Details

}
+
+ searchLite(servingConfig, body=None, x__xgafv=None) +
Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.
+
+Args:
+  servingConfig: string, Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for SearchService.Search method.
+  "boostSpec": { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
+    "conditionBoostSpecs": [ # Condition boost specifications. If a document matches multiple conditions in the specifictions, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
+      { # Boost applies to documents which match a condition.
+        "boost": 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
+        "boostControlSpec": { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
+          "attributeType": "A String", # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
+          "controlPoints": [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
+            { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
+              "attributeValue": "A String", # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
+              "boostAmount": 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
+            },
+          ],
+          "fieldName": "A String", # The name of the field whose value will be used to determine the boost amount.
+          "interpolationType": "A String", # The interpolation type to be applied to connect the control points listed below.
+        },
+        "condition": "A String", # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID "doc_1" or "doc_2", and color "Red" or "Blue": `(document_id: ANY("doc_1", "doc_2")) AND (color: ANY("Red", "Blue"))`
+      },
+    ],
+  },
+  "branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
+  "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
+  "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
+    "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
+      "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
+      "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
+    },
+    "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
+      "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
+      "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
+      "numNextSegments": 42, # Return at most `num_next_segments` segments after each selected segments.
+      "numPreviousSegments": 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
+      "returnExtractiveSegmentScore": True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
+    },
+    "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
+    "snippetSpec": { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
+      "maxSnippetCount": 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count > 0.
+      "referenceOnly": True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
+      "returnSnippet": True or False, # If `true`, then return snippet. If no snippet can be generated, we return "No snippet is available for this page." A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
+    },
+    "summarySpec": { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
+      "ignoreAdversarialQuery": True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
+      "ignoreJailBreakingQuery": True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. "Reply in the tone of a competing company's CEO". If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
+      "ignoreLowRelevantContent": True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
+      "ignoreNonSummarySeekingQuery": True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
+      "includeCitations": True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud's fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
+      "languageCode": "A String", # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
+      "modelPromptSpec": { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
+        "preamble": "A String", # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
+      },
+      "modelSpec": { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
+        "version": "A String", # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
+      },
+      "summaryResultCount": 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+      "useSemanticChunks": True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
+    },
+  },
+  "dataStoreSpecs": [ # Specs defining dataStores to filter on in a search call and configurations for those dataStores. This is only considered for engines with multiple dataStores use case. For single dataStore within an engine, they should use the specs at the top level.
+    { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
+      "dataStore": "A String", # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`.
+      "filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+    },
+  ],
+  "embeddingSpec": { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
+    "embeddingVectors": [ # The embedding vector used for retrieval. Limit to 1.
+      { # Embedding vector.
+        "fieldPath": "A String", # Embedding field path in schema.
+        "vector": [ # Query embedding vector.
+          3.14,
+        ],
+      },
+    ],
+  },
+  "facetSpecs": [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+    { # A facet specification to perform faceted search.
+      "enableDynamicPosition": True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it's not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * "rating", enable_dynamic_position = true * "price", enable_dynamic_position = false * "brands", enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be ("price", "brands", "rating", "gender") or ("price", "brands", "gender", "rating") depends on how API orders "gender" and "rating" facets. However, notice that "price" and "brands" are always ranked at first and second position because their enable_dynamic_position is false.
+      "excludedFilterKeys": [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet "Red" and 200 documents with the color facet "Blue". A query containing the filter "color:ANY("Red")" and having "color" as FacetKey.key would by default return only "Red" documents in the search results, and also return "Red" with count 100 as the only color facet. Although there are also blue documents available, "Blue" would not be shown as an available facet value. If "color" is listed in "excludedFilterKeys", then the query returns the facet values "Red" with count 100 and "Blue" with count 200, because the "color" key is now excluded from the filter. Because this field doesn't affect search results, the search results are still correctly filtered to return only "Red" documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
+        "A String",
+      ],
+      "facetKey": { # Specifies how a facet is computed. # Required. The facet key specification.
+        "caseInsensitive": True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
+        "contains": [ # Only get facet values that contain the given strings. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "contains" to "2022", the "category" facet only contains "Action > 2022" and "Sci-Fi > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "intervals": [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
+          { # A floating point interval.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+        ],
+        "key": "A String", # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
+        "orderBy": "A String", # The order in which documents are returned. Allowed values are: * "count desc", which means order by SearchResponse.Facet.values.count descending. * "value desc", which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
+        "prefixes": [ # Only get facet values that start with the given string prefix. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "prefixes" to "Action", the "category" facet only contains "Action > 2022" and "Action > 2021". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+        "restrictedValues": [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose "category" has three values "Action > 2022", "Action > 2021" and "Sci-Fi > 2022". If set "restricted_values" to "Action > 2022", the "category" facet only contains "Action > 2022". Only supported on textual fields. Maximum is 10.
+          "A String",
+        ],
+      },
+      "limit": 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is "healthcare_aggregation_key", the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
+    },
+  ],
+  "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
+  "imageQuery": { # Specifies the image query input. # Raw image query.
+    "imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
+  },
+  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn't specified, the query language code is automatically detected, which may not be accurate.
+  "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
+    "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED.
+    "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
+      "A String",
+    ],
+  },
+  "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "oneBoxPageSize": 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
+  "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
+  "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
+  "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: "au"` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
+    "a_key": "",
+  },
+  "personalizationSpec": { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
+    "mode": "A String", # The personalization mode of the search request. Defaults to Mode.AUTO.
+  },
+  "query": "A String", # Raw search query.
+  "queryExpansionSpec": { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
+    "condition": "A String", # The condition under which query expansion should occur. Default to Condition.DISABLED.
+    "pinUnexpandedResults": True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
+  },
+  "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joined by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of a location, such as "US" and "419". For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
+  "relevanceThreshold": "A String", # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information.
+  "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search.
+  "searchAsYouTypeSpec": { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
+    "condition": "A String", # The condition under which search as you type should occur. Default to Condition.DISABLED.
+  },
+  "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
+  "session": "A String", # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is "How did Alphabet do in 2022?" and the current query is "How about 2023?", the current query will be interpreted as "How did Alphabet do in 2023?". Example #2 (coordination between /search API calls and /answer API calls): 1. Call /search API with the auto-session mode (see below). 2. Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Auto-session mode: when `projects/.../sessions/-` is used, a new session gets automatically created. Otherwise, users can use the create-session API to create a session manually. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
+  "sessionSpec": { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
+    "queryId": "A String", # If set, the search result gets stored to the "turn" specified by this query ID. Example: Let's say the session looks like this: session { name: ".../sessions/xxx" turns { query { text: "What is foo?" query_id: ".../questions/yyy" } answer: "Foo is ..." } turns { query { text: "How about bar then?" query_id: ".../questions/zzz" } } } The user can call /search API with a request like this: session: ".../sessions/xxx" session_spec { query_id: ".../questions/zzz" } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID & query ID.
+    "searchResultPersistenceCount": 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is simliar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
+  },
+  "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
+    "mode": "A String", # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
+  },
+  "userInfo": { # Information of an end user. # Information about the end user. Highly recommended for analytics. UserInfo.user_agent is used to deduce `device_type` for analytics.
+    "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
+    "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don't set for anonymous users. Always use a hashed value for this ID. Don't set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+  },
+  "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
+    "a_key": "A String",
+  },
+  "userPseudoId": "A String", # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchService.Search method.
+  "appliedControls": [ # Controls applied as part of the Control service.
+    "A String",
+  ],
+  "attributionToken": "A String", # A unique search token. This should be included in the UserEvent logs resulting from this search, which enables accurate attribution of search model performance. This also helps to identify a request during the customer support scenarios.
+  "correctedQuery": "A String", # Contains the spell corrected query, if found. If the spell correction type is AUTOMATIC, then the search results are based on corrected_query. Otherwise the original query is used for search.
+  "facets": [ # Results of facets requested by user.
+    { # A facet result.
+      "dynamicFacet": True or False, # Whether the facet is dynamically generated.
+      "key": "A String", # The key for this facet. For example, `"colors"` or `"price"`. It matches SearchRequest.FacetSpec.FacetKey.key.
+      "values": [ # The facet values for this field.
+        { # A facet value which contains value names and their count.
+          "count": "A String", # Number of items that have this facet value.
+          "interval": { # A floating point interval. # Interval value for a facet, such as 10, 20) for facet "price". It matches [SearchRequest.FacetSpec.FacetKey.intervals.
+            "exclusiveMaximum": 3.14, # Exclusive upper bound.
+            "exclusiveMinimum": 3.14, # Exclusive lower bound.
+            "maximum": 3.14, # Inclusive upper bound.
+            "minimum": 3.14, # Inclusive lower bound.
+          },
+          "value": "A String", # Text value of a facet, such as "Black" for facet "colors".
+        },
+      ],
+    },
+  ],
+  "geoSearchDebugInfo": [
+    { # Debug information specifically related to forward geocoding issues arising from Geolocation Search.
+      "errorMessage": "A String", # The error produced.
+      "originalAddressQuery": "A String", # The address from which forward geocoding ingestion produced issues.
+    },
+  ],
+  "guidedSearchResult": { # Guided search result. The guided search helps user to refine the search results and narrow down to the real needs from a broaded search results. # Guided search result.
+    "followUpQuestions": [ # Suggested follow-up questions.
+      "A String",
+    ],
+    "refinementAttributes": [ # A list of ranked refinement attributes.
+      { # Useful attribute for search result refinements.
+        "attributeKey": "A String", # Attribute key used to refine the results. For example, `"movie_type"`.
+        "attributeValue": "A String", # Attribute value used to refine the results. For example, `"drama"`.
+      },
+    ],
+  },
+  "naturalLanguageQueryUnderstandingInfo": { # Information describing what natural language understanding was done on the input query. # Natural language query understanding information for the returned results.
+    "extractedFilters": "A String", # The filters that were extracted from the input query.
+    "rewrittenQuery": "A String", # Rewritten input query minus the extracted filters.
+    "structuredExtractedFilter": { # The filters that were extracted from the input query represented in a structured form. # The filters that were extracted from the input query represented in a structured form.
+      "expression": { # The expression denoting the filter that was extracted from the input query. # The expression denoting the filter that was extracted from the input query in a structured form. It can be a simple expression denoting a single string, numerical or geolocation constraint or a compound expression which is a combination of multiple expressions connected using logical (OR and AND) operators.
+        "andExpr": { # Logical `And` operator. # Logical "And" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ANDed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "geolocationConstraint": { # Constraint of a geolocation field. Name of the geolocation field as defined in the schema. # Geolocation constraint expression.
+          "address": "A String", # The reference address that was inferred from the input query. The proximity of the reference address to the geolocation field will be used to filter the results.
+          "fieldName": "A String", # The name of the geolocation field as defined in the schema.
+          "latitude": 3.14, # The latitude of the geolocation inferred from the input query.
+          "longitude": 3.14, # The longitude of the geolocation inferred from the input query.
+          "radiusInMeters": 3.14, # The radius in meters around the address. The record is returned if the location of the geolocation field is within the radius.
+        },
+        "numberConstraint": { # Constraint expression of a number field. Example: price < 100. # Numerical constraint expression.
+          "comparison": "A String", # The comparison operation performed between the field value and the value specified in the constraint.
+          "fieldName": "A String", # Name of the numerical field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "value": 3.14, # The value specified in the numerical constraint.
+        },
+        "orExpr": { # Logical `Or` operator. # Logical "Or" compound operator connecting multiple expressions.
+          "expressions": [ # The expressions that were ORed together.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaSearchResponseNaturalLanguageQueryUnderstandingInfoStructuredExtractedFilterExpression
+          ],
+        },
+        "stringConstraint": { # Constraint expression of a string field. # String constraint expression.
+          "fieldName": "A String", # Name of the string field as defined in the schema.
+          "querySegment": "A String", # Identifies the keywords within the search query that match a filter.
+          "values": [ # Values of the string field. The record will only be returned if the field value matches one of the values specified here.
+            "A String",
+          ],
+        },
+      },
+    },
+  },
+  "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "oneBoxResults": [ # A list of One Box results. There can be multiple One Box results of different types.
+    { # OneBoxResult is a holder for all results of specific type that we want to display in UI differently.
+      "oneBoxType": "A String", # The type of One Box result.
+      "searchResults": [ # The search results for this One Box.
+        { # Represents the search results.
+          "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+            "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+              "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+              "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+                # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+              ],
+            },
+            "content": "A String", # Content is a string from a document (parsed content).
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+              "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+                "a_key": "", # Properties of the object.
+              },
+              "title": "A String", # Title of the document.
+              "uri": "A String", # Uri of the document.
+            },
+            "id": "A String", # Unique chunk ID of the current chunk.
+            "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+              "pageEnd": 42, # The end page of the chunk.
+              "pageStart": 42, # The start page of the chunk.
+            },
+            "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+          },
+          "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+            "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+              "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+              "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+              "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+            },
+            "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+              "a_key": "", # Properties of the object.
+            },
+            "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+              "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+                { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+                  "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+                  "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                    {
+                      "a_key": "", # Properties of the object. Contains field @type with type URL.
+                    },
+                  ],
+                  "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+                },
+              ],
+              "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+            },
+            "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+            "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+            "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+            "schemaId": "A String", # The identifier of the schema located in the same data store.
+            "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+              "a_key": "", # Properties of the object.
+            },
+          },
+          "id": "A String", # Document.id of the searched Document.
+          "modelScores": { # Google provided available scores.
+            "a_key": { # Double list.
+              "values": [ # Double values.
+                3.14,
+              ],
+            },
+          },
+        },
+      ],
+    },
+  ],
+  "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results.
+    "expandedQuery": True or False, # Bool describing whether query expansion has occurred.
+    "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true.
+  },
+  "redirectUri": "A String", # The URI of a customer-defined redirect page. If redirect action is triggered, no search is performed, and only redirect_uri and attribution_token are set in the response.
+  "results": [ # A list of matched documents. The order represents the ranking.
+    { # Represents the search results.
+      "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
+        "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk.
+          "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+          "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.
+            # Object with schema name: GoogleCloudDiscoveryengineV1betaChunk
+          ],
+        },
+        "content": "A String", # Content is a string from a document (parsed content).
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "documentMetadata": { # Document metadata contains the information of the document of the current chunk. # Metadata of the document from the current chunk.
+          "structData": { # Data representation. The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+            "a_key": "", # Properties of the object.
+          },
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Uri of the document.
+        },
+        "id": "A String", # Unique chunk ID of the current chunk.
+        "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "pageSpan": { # Page span of the chunk. # Page span of the chunk.
+          "pageEnd": 42, # The end page of the chunk.
+          "pageStart": 42, # The start page of the chunk.
+        },
+        "relevanceScore": 3.14, # Output only. Represents the relevance score based on similarity. Higher score indicates higher chunk relevance. The score is in range [-1.0, 1.0]. Only populated on SearchService.SearchResponse.
+      },
+      "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as `retrievable` are populated.
+        "content": { # Unstructured data linked to this document. # The unstructured data linked to this document. Content must be set if this document is under a `CONTENT_REQUIRED` data store.
+          "mimeType": "A String", # The MIME type of the content. Supported types: * `application/pdf` (PDF, only native PDFs are supported for now) * `text/html` (HTML) * `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX) * `application/vnd.openxmlformats-officedocument.presentationml.presentation` (PPTX) * `text/plain` (TXT) See https://www.iana.org/assignments/media-types/media-types.xhtml.
+          "rawBytes": "A String", # The content represented as a stream of bytes. The maximum length is 1,000,000 bytes (1 MB / ~0.95 MiB). Note: As with all `bytes` fields, this field is represented as pure binary in Protocol Buffers and base64-encoded string in JSON. For example, `abc123!?$*&()'-=@~` should be represented as `YWJjMTIzIT8kKiYoKSctPUB+` in JSON. See https://developers.google.com/protocol-buffers/docs/proto3#json.
+          "uri": "A String", # The URI of the content. Only Cloud Storage URIs (e.g. `gs://bucket-name/path/to/file`) are supported. The maximum file size is 2.5 MB for text-based formats, 200 MB for other formats.
+        },
+        "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document.
+          "a_key": "", # Properties of the object.
+        },
+        "id": "A String", # Immutable. The identifier of the document. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "indexStatus": { # Index status of the document. # Output only. The index status of the document. * If document is indexed successfully, the index_time field is populated. * Otherwise, if document is not indexed due to errors, the error_samples field is populated. * Otherwise, index_status is unset.
+          "errorSamples": [ # A sample of errors encountered while indexing the document. If this field is populated, the document is not indexed due to errors.
+            { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+            },
+          ],
+          "indexTime": "A String", # The time when the document was indexed. If this field is populated, it means the document has been indexed.
+        },
+        "indexTime": "A String", # Output only. The last time the document was indexed. If this field is set, the document could be returned in search results. This field is OUTPUT_ONLY. If this field is not populated, it means the document has never been indexed.
+        "jsonData": "A String", # The JSON string representation of the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+        "name": "A String", # Immutable. The full resource name of the document. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
+        "parentDocumentId": "A String", # The identifier of the parent document. Currently supports at most two level document hierarchy. Id should conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) standard with a length limit of 63 characters.
+        "schemaId": "A String", # The identifier of the schema located in the same data store.
+        "structData": { # The structured JSON data for the document. It should conform to the registered Schema or an `INVALID_ARGUMENT` error is thrown.
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "id": "A String", # Document.id of the searched Document.
+      "modelScores": { # Google provided available scores.
+        "a_key": { # Double list.
+          "values": [ # Double values.
+            3.14,
+          ],
+        },
+      },
+    },
+  ],
+  "sessionInfo": { # Information about the session. # Session information. Only set if SearchRequest.session is provided. See its description for more details.
+    "name": "A String", # Name of the session. If the auto-session mode is used (when SearchRequest.session ends with "-"), this field holds the newly generated session name.
+    "queryId": "A String", # Query ID that corresponds to this search API call. One session can have multiple turns, each with a unique query ID. By specifying the session name and this query ID in the Answer API call, the answer generation happens in the context of the search results from this search call.
+  },
+  "summary": { # Summary of the top N search results specified by the summary spec. # A summary as part of the search results. This field is only returned if SearchRequest.ContentSearchSpec.summary_spec is set.
+    "safetyAttributes": { # Safety Attribute categories and their associated confidence scores. # A collection of Safety Attribute categories and their associated confidence scores.
+      "categories": [ # The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.
+        "A String",
+      ],
+      "scores": [ # The confidence scores of the each category, higher value means higher confidence. Order matches the Categories.
+        3.14,
+      ],
+    },
+    "summarySkippedReasons": [ # Additional summary-skipped reasons. This provides the reason for ignored cases. If nothing is skipped, this field is not set.
+      "A String",
+    ],
+    "summaryText": "A String", # The summary content.
+    "summaryWithMetadata": { # Summary with metadata information. # Summary with metadata information.
+      "citationMetadata": { # Citation metadata. # Citation metadata for given summary.
+        "citations": [ # Citations for segments.
+          { # Citation info for a segment.
+            "endIndex": "A String", # End of the attributed segment, exclusive.
+            "sources": [ # Citation sources for the attributed segment.
+              { # Citation source.
+                "referenceIndex": "A String", # Document reference index from SummaryWithMetadata.references. It is 0-indexed and the value will be zero if the reference_index is not set explicitly.
+              },
+            ],
+            "startIndex": "A String", # Index indicates the start of the segment, measured in bytes/unicode.
+          },
+        ],
+      },
+      "references": [ # Document References.
+        { # Document reference.
+          "chunkContents": [ # List of cited chunk contents derived from document content.
+            { # Chunk content.
+              "content": "A String", # Chunk textual content.
+              "pageIdentifier": "A String", # Page identifier.
+            },
+          ],
+          "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.
+          "title": "A String", # Title of the document.
+          "uri": "A String", # Cloud Storage or HTTP uri for the document.
+        },
+      ],
+      "summary": "A String", # Summary text with no citation information.
+    },
+  },
+  "totalSize": 42, # The estimated total count of matched items irrespective of pagination. The count of results returned by pagination may be less than the total_size that matches.
+}
+
+ +
+ searchLite_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
search_next()
Retrieves the next page of results.
diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.userEvents.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.userEvents.html
index 961c4c7ccb..ab61b8b4e3 100644
--- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.userEvents.html
+++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.userEvents.html
@@ -188,7 +188,7 @@ 

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -349,7 +349,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -435,7 +435,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.userEvents.html b/docs/dyn/discoveryengine_v1beta.projects.locations.userEvents.html index 172b2c0d42..b9d30f0310 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.userEvents.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.userEvents.html @@ -158,7 +158,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. @@ -244,7 +244,7 @@

Method Details

], "engine": "A String", # The Engine resource name, in the form of `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}`. Optional. Only required for Engine produced user events. For example, user events from blended search. "eventTime": "A String", # Only required for UserEventService.ImportUserEvents method. Timestamp of when the user event happened. - "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. + "eventType": "A String", # Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc. "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. One example is for `search` events, the associated SearchRequest may contain a filter expression in SearchRequest.filter conforming to https://google.aip.dev/160#filtering. Similarly, for `view-item-list` events that are generated from a RecommendRequest, this field may be populated directly from RecommendRequest.filter conforming to https://google.aip.dev/160#filtering. The value must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. "mediaInfo": { # Media-specific user event information. # Media-specific info. "mediaProgressDuration": "A String", # The media progress time in seconds, if applicable. For example, if the end user has finished 90 seconds of a playback video, then MediaInfo.media_progress_duration.seconds should be set to 90. diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json index 25022384f0..69663127f2 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json @@ -1752,6 +1752,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1SearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1SearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -3283,6 +3311,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1SearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1SearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -4961,6 +5017,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1SearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1SearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -6006,9 +6090,177 @@ } } }, -"revision": "20241020", +"revision": "20241025", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { +"GoogleApiDistribution": { +"description": "`Distribution` contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets. The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths. Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the `mean` and `sum_of_squared_deviation` fields meaningless.", +"id": "GoogleApiDistribution", +"properties": { +"bucketCounts": { +"description": "The number of values in each bucket of the histogram, as described in `bucket_options`. If the distribution does not have a histogram, then omit this field. If there is a histogram, then the sum of the values in `bucket_counts` must equal the value in the `count` field of the distribution. If present, `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If you supply fewer than N values, the remaining values are assumed to be 0. The order of the values in `bucket_counts` follows the bucket numbering schemes described for the three bucket types. The first value must be the count for the underflow bucket (number 0). The next N-2 values are the counts for the finite buckets (number 1 through N-2). The N'th value in `bucket_counts` is the count for the overflow bucket (number N-1).", +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"bucketOptions": { +"$ref": "GoogleApiDistributionBucketOptions", +"description": "Defines the histogram bucket boundaries. If the distribution does not contain a histogram, then omit this field." +}, +"count": { +"description": "The number of values in the population. Must be non-negative. This value must equal the sum of the values in `bucket_counts` if a histogram is provided.", +"format": "int64", +"type": "string" +}, +"exemplars": { +"description": "Must be in increasing order of `value` field.", +"items": { +"$ref": "GoogleApiDistributionExemplar" +}, +"type": "array" +}, +"mean": { +"description": "The arithmetic mean of the values in the population. If `count` is zero then this field must be zero.", +"format": "double", +"type": "number" +}, +"range": { +"$ref": "GoogleApiDistributionRange", +"description": "If specified, contains the range of the population values. The field must not be present if the `count` is zero." +}, +"sumOfSquaredDeviation": { +"description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is: Sum[i=1..n]((x_i - mean)^2) Knuth, \"The Art of Computer Programming\", Vol. 2, page 232, 3rd edition describes Welford's method for accumulating this sum in one pass. If `count` is zero then this field must be zero.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptions": { +"description": "`BucketOptions` describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. `BucketOptions` does not include the number of values in each bucket. A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", +"id": "GoogleApiDistributionBucketOptions", +"properties": { +"explicitBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsExplicit", +"description": "The explicit buckets." +}, +"exponentialBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsExponential", +"description": "The exponential buckets." +}, +"linearBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsLinear", +"description": "The linear bucket." +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsExplicit": { +"description": "Specifies a set of buckets with arbitrary widths. There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): bounds[i] Lower bound (1 <= i < N); bounds[i - 1] The `bounds` field must contain at least one element. If `bounds` has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", +"id": "GoogleApiDistributionBucketOptionsExplicit", +"properties": { +"bounds": { +"description": "The values must be monotonically increasing.", +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsExponential": { +"description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket. There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).", +"id": "GoogleApiDistributionBucketOptionsExponential", +"properties": { +"growthFactor": { +"description": "Must be greater than 1.", +"format": "double", +"type": "number" +}, +"numFiniteBuckets": { +"description": "Must be greater than 0.", +"format": "int32", +"type": "integer" +}, +"scale": { +"description": "Must be greater than 0.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsLinear": { +"description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket. There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).", +"id": "GoogleApiDistributionBucketOptionsLinear", +"properties": { +"numFiniteBuckets": { +"description": "Must be greater than 0.", +"format": "int32", +"type": "integer" +}, +"offset": { +"description": "Lower bound of the first bucket.", +"format": "double", +"type": "number" +}, +"width": { +"description": "Must be greater than 0.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionExemplar": { +"description": "Exemplars are example points that may be used to annotate aggregated distribution values. They are metadata that gives information about a particular value added to a Distribution bucket, such as a trace ID that was active when a value was added. They may contain further information, such as a example values and timestamps, origin, etc.", +"id": "GoogleApiDistributionExemplar", +"properties": { +"attachments": { +"description": "Contextual information about the example value. Examples are: Trace: type.googleapis.com/google.monitoring.v3.SpanContext Literal string: type.googleapis.com/google.protobuf.StringValue Labels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabels There may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system.", +"items": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"type": "object" +}, +"type": "array" +}, +"timestamp": { +"description": "The observation (sampling) time of the above value.", +"format": "google-datetime", +"type": "string" +}, +"value": { +"description": "Value of the exemplar point. This value determines to which bucket the exemplar belongs.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionRange": { +"description": "The range of the population values.", +"id": "GoogleApiDistributionRange", +"properties": { +"max": { +"description": "The maximum of the population values.", +"format": "double", +"type": "number" +}, +"min": { +"description": "The minimum of the population values.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, "GoogleApiHttpBody": { "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", "id": "GoogleApiHttpBody", @@ -6036,6 +6288,64 @@ }, "type": "object" }, +"GoogleApiMetric": { +"description": "A specific metric, identified by specifying values for all of the labels of a `MetricDescriptor`.", +"id": "GoogleApiMetric", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "The set of label values that uniquely identify this metric. All labels listed in the `MetricDescriptor` must be assigned values.", +"type": "object" +}, +"type": { +"description": "An existing metric type, see google.api.MetricDescriptor. For example, `custom.googleapis.com/invoice/paid/amount`.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleApiMonitoredResource": { +"description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The `type` field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the `labels` field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for `\"gce_instance\"` has labels `\"project_id\"`, `\"instance_id\"` and `\"zone\"`: { \"type\": \"gce_instance\", \"labels\": { \"project_id\": \"my-project\", \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }}", +"id": "GoogleApiMonitoredResource", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `\"project_id\"`, `\"instance_id\"`, and `\"zone\"`.", +"type": "object" +}, +"type": { +"description": "Required. The monitored resource type. This field must match the `type` field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleApiMonitoredResourceMetadata": { +"description": "Auxiliary metadata for a MonitoredResource object. MonitoredResource objects contain the minimum set of information to uniquely identify a monitored resource instance. There is some other useful auxiliary metadata. Monitoring and Logging use an ingestion pipeline to extract metadata for cloud resources of all types, and store the metadata in this message.", +"id": "GoogleApiMonitoredResourceMetadata", +"properties": { +"systemLabels": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example: { \"name\": \"my-test-instance\", \"security_group\": [\"a\", \"b\", \"c\"], \"spot_instance\": false }", +"type": "object" +}, +"userLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. A map of user-defined metadata labels.", +"type": "object" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineLoggingErrorContext": { "description": "A description of the context in which an error occurred.", "id": "GoogleCloudDiscoveryengineLoggingErrorContext", @@ -6346,6 +6656,10 @@ "description": "Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method.", "type": "boolean" }, +"groundingSpec": { +"$ref": "GoogleCloudDiscoveryengineV1AnswerQueryRequestGroundingSpec", +"description": "Optional. Grounding specification." +}, "query": { "$ref": "GoogleCloudDiscoveryengineV1Query", "description": "Required. Current user query." @@ -6445,6 +6759,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1AnswerQueryRequestGroundingSpec": { +"description": "Grounding specification.", +"id": "GoogleCloudDiscoveryengineV1AnswerQueryRequestGroundingSpec", +"properties": { +"includeGroundingSupports": { +"description": "Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim.", +"type": "boolean" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1AnswerQueryRequestQueryUnderstandingSpec": { "description": "Query understanding specification.", "id": "GoogleCloudDiscoveryengineV1AnswerQueryRequestQueryUnderstandingSpec", @@ -11729,7 +12054,7 @@ "type": "string" }, "eventType": { -"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", +"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", "type": "string" }, "filter": { @@ -12639,6 +12964,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries": { +"description": "The historical crawl rate timeseries data, used for monitoring.", +"id": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"properties": { +"qpsTimeSeries": { +"$ref": "GoogleMonitoringV3TimeSeries", +"description": "The QPS of the crawl rate." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.CreateDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata", @@ -12971,6 +13307,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries": { +"description": "The historical dedicated crawl rate timeseries data, used for monitoring. Dedicated crawl is used by Vertex AI to crawl the user's website when dedicate crawl is set.", +"id": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"properties": { +"autoRefreshCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of auto-refresh, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and the crawl rate is for best effort use cases like refreshing urls periodically." +}, +"userTriggeredCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of user triggered crawl, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and user triggered crawl rate is for deterministic use cases like crawling urls or sitemaps specified by users." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.DeleteDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata", @@ -14161,6 +14512,55 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse": { +"description": "Response message for CrawlRateManagementService.ObtainCrawlRate method. The response contains organcic or dedicated crawl rate time series data for monitoring, depending on whether dedicated crawl rate is set.", +"id": "GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse", +"properties": { +"dedicatedCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"description": "The historical dedicated crawl rate timeseries data, used for monitoring." +}, +"error": { +"$ref": "GoogleRpcStatus", +"description": "Errors from service when handling the request." +}, +"organicCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"description": "The historical organic crawl rate timeseries data, used for monitoring." +}, +"state": { +"description": "Output only. The state of the response.", +"enum": [ +"STATE_UNSPECIFIED", +"SUCCEEDED", +"FAILED" +], +"enumDescriptions": [ +"The state is unspecified.", +"The state is successful.", +"The state is failed." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries": { +"description": "The historical organic crawl rate timeseries data, used for monitoring. Organic crawl is auto-determined by Google to crawl the user's website when dedicate crawl is not set. Crawl rate is the QPS of crawl request Google sends to the user's website.", +"id": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"properties": { +"googleOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Google's organic crawl rate time series, which is the sum of all googlebots' crawl rate. Please refer to https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers for more details about googlebots." +}, +"vertexAiOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's organic crawl rate time series, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is not set. Please refer to https://developers.google.com/search/docs/crawling-indexing/google-common-crawlers#google-cloudvertexbot for more details about Google-CloudVertexBot." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaProject": { "description": "Metadata and configurations for a Google Cloud project in the service.", "id": "GoogleCloudDiscoveryengineV1alphaProject", @@ -18445,6 +18845,139 @@ }, "type": "object" }, +"GoogleMonitoringV3Point": { +"description": "A single data point in a time series.", +"id": "GoogleMonitoringV3Point", +"properties": { +"interval": { +"$ref": "GoogleMonitoringV3TimeInterval", +"description": "The time interval to which the data point applies. For `GAUGE` metrics, the start time is optional, but if it is supplied, it must equal the end time. For `DELTA` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For `CUMULATIVE` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +}, +"value": { +"$ref": "GoogleMonitoringV3TypedValue", +"description": "The value of the data point." +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeInterval": { +"description": "A time interval extending just after a start time through an end time. If the start time is the same as the end time, then the interval represents a single point in time.", +"id": "GoogleMonitoringV3TimeInterval", +"properties": { +"endTime": { +"description": "Required. The end of the time interval.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeSeries": { +"description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", +"id": "GoogleMonitoringV3TimeSeries", +"properties": { +"description": { +"description": "Input only. A detailed description of the time series that will be associated with the google.api.MetricDescriptor for the metric. Once set, this field cannot be changed through CreateTimeSeries.", +"type": "string" +}, +"metadata": { +"$ref": "GoogleApiMonitoredResourceMetadata", +"description": "Output only. The associated monitored resource metadata. When reading a time series, this field will include metadata labels that are explicitly named in the reduction. When creating a time series, this field is ignored." +}, +"metric": { +"$ref": "GoogleApiMetric", +"description": "The associated metric. A fully-specified metric used to identify the time series." +}, +"metricKind": { +"description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either `GAUGE` (the default) or `CUMULATIVE`.", +"enum": [ +"METRIC_KIND_UNSPECIFIED", +"GAUGE", +"DELTA", +"CUMULATIVE" +], +"enumDescriptions": [ +"Do not use this default value.", +"An instantaneous measurement of a value.", +"The change in a value during a time interval.", +"A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +], +"type": "string" +}, +"points": { +"description": "The data points of this time series. When listing time series, points are returned in reverse time order. When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.", +"items": { +"$ref": "GoogleMonitoringV3Point" +}, +"type": "array" +}, +"resource": { +"$ref": "GoogleApiMonitoredResource", +"description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data. For more information, see [Monitored resources for custom metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources)." +}, +"unit": { +"description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. This field can only be changed through CreateTimeSeries when it is empty.", +"type": "string" +}, +"valueType": { +"description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the type of the data in the `points` field.", +"enum": [ +"VALUE_TYPE_UNSPECIFIED", +"BOOL", +"INT64", +"DOUBLE", +"STRING", +"DISTRIBUTION", +"MONEY" +], +"enumDescriptions": [ +"Do not use this default value.", +"The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a signed 64-bit integer.", +"The value is a double precision floating point number.", +"The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a `Distribution`.", +"The value is money." +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleMonitoringV3TypedValue": { +"description": "A single strongly-typed value.", +"id": "GoogleMonitoringV3TypedValue", +"properties": { +"boolValue": { +"description": "A Boolean value: `true` or `false`.", +"type": "boolean" +}, +"distributionValue": { +"$ref": "GoogleApiDistribution", +"description": "A distribution value." +}, +"doubleValue": { +"description": "A 64-bit double-precision floating-point number. Its magnitude is approximately \u00b110\u00b1300 and it has 16 significant digits of precision.", +"format": "double", +"type": "number" +}, +"int64Value": { +"description": "A 64-bit integer. Its range is approximately \u00b19.2x1018.", +"format": "int64", +"type": "string" +}, +"stringValue": { +"description": "A variable-length string value.", +"type": "string" +} +}, +"type": "object" +}, "GoogleProtobufEmpty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "GoogleProtobufEmpty", diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index fbe7b9ed17..80d1b19ebb 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -1316,6 +1316,38 @@ } } }, +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/dataStores/{dataStoresId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.dataStores.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/dataStores/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "completionSuggestions": { "methods": { "import": { @@ -2309,6 +2341,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -3470,6 +3530,38 @@ } }, "resources": { +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "controls": { "methods": { "create": { @@ -4072,6 +4164,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -5134,6 +5254,38 @@ } } }, +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/dataStores/{dataStoresId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.dataStores.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/dataStores/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "completionSuggestions": { "methods": { "import": { @@ -6026,6 +6178,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -7682,7 +7862,7 @@ } } }, -"revision": "20241020", +"revision": "20241025", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -7880,6 +8060,24 @@ }, "type": "object" }, +"GoogleApiMetric": { +"description": "A specific metric, identified by specifying values for all of the labels of a `MetricDescriptor`.", +"id": "GoogleApiMetric", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "The set of label values that uniquely identify this metric. All labels listed in the `MetricDescriptor` must be assigned values.", +"type": "object" +}, +"type": { +"description": "An existing metric type, see google.api.MetricDescriptor. For example, `custom.googleapis.com/invoice/paid/amount`.", +"type": "string" +} +}, +"type": "object" +}, "GoogleApiMonitoredResource": { "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The `type` field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the `labels` field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for `\"gce_instance\"` has labels `\"project_id\"`, `\"instance_id\"` and `\"zone\"`: { \"type\": \"gce_instance\", \"labels\": { \"project_id\": \"my-project\", \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }}", "id": "GoogleApiMonitoredResource", @@ -7898,6 +8096,28 @@ }, "type": "object" }, +"GoogleApiMonitoredResourceMetadata": { +"description": "Auxiliary metadata for a MonitoredResource object. MonitoredResource objects contain the minimum set of information to uniquely identify a monitored resource instance. There is some other useful auxiliary metadata. Monitoring and Logging use an ingestion pipeline to extract metadata for cloud resources of all types, and store the metadata in this message.", +"id": "GoogleApiMonitoredResourceMetadata", +"properties": { +"systemLabels": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example: { \"name\": \"my-test-instance\", \"security_group\": [\"a\", \"b\", \"c\"], \"spot_instance\": false }", +"type": "object" +}, +"userLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. A map of user-defined metadata labels.", +"type": "object" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineLoggingErrorContext": { "description": "A description of the context in which an error occurred.", "id": "GoogleCloudDiscoveryengineLoggingErrorContext", @@ -9654,6 +9874,236 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest": { +"description": "Request message for CompletionService.AdvancedCompleteQuery method. .", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequest", +"properties": { +"boostSpec": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpec", +"description": "Optional. Specification to boost suggestions matching the condition." +}, +"includeTailSuggestions": { +"description": "Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.", +"type": "boolean" +}, +"query": { +"description": "Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called \"zero prefix\" feature, which returns user's recently searched queries given the empty query.", +"type": "string" +}, +"queryModel": { +"description": "Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.", +"type": "string" +}, +"suggestionTypes": { +"description": "Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.", +"items": { +"enum": [ +"SUGGESTION_TYPE_UNSPECIFIED", +"QUERY", +"PEOPLE", +"CONTENT", +"RECENT_SEARCH", +"GOOGLE_WORKSPACE" +], +"enumDescriptions": [ +"Default value.", +"Returns query suggestions.", +"Returns people suggestions.", +"Returns content suggestions.", +"Returns recent search suggestions.", +"Returns Google Workspace suggestions." +], +"type": "string" +}, +"type": "array" +}, +"userInfo": { +"$ref": "GoogleCloudDiscoveryengineV1alphaUserInfo", +"description": "Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info." +}, +"userPseudoId": { +"description": "A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpec": { +"description": "Specification to boost suggestions based on the condtion of the suggestion.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpec", +"properties": { +"conditionBoostSpecs": { +"description": "Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec": { +"description": "Boost applies to suggestions which match a condition.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec", +"properties": { +"boost": { +"description": "Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.", +"format": "float", +"type": "number" +}, +"condition": { +"description": "An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY(\"en\", \"fr\"))`", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse": { +"description": "Response message for CompletionService.AdvancedCompleteQuery method.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponse", +"properties": { +"contentSuggestions": { +"description": "Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseContentSuggestion" +}, +"type": "array" +}, +"peopleSuggestions": { +"description": "Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponsePersonSuggestion" +}, +"type": "array" +}, +"querySuggestions": { +"description": "Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseQuerySuggestion" +}, +"type": "array" +}, +"recentSearchSuggestions": { +"description": "Results of the matched \"recent search\" suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseRecentSearchSuggestion" +}, +"type": "array" +}, +"tailMatchTriggered": { +"description": "True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.", +"type": "boolean" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseContentSuggestion": { +"description": "Suggestions as content.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseContentSuggestion", +"properties": { +"contentType": { +"description": "The type of the content suggestion.", +"enum": [ +"CONTENT_TYPE_UNSPECIFIED", +"GOOGLE_WORKSPACE", +"THIRD_PARTY" +], +"enumDescriptions": [ +"Default value.", +"The suggestion is from a Google Workspace source.", +"The suggestion is from a third party source." +], +"type": "string" +}, +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"type": "string" +}, +"document": { +"$ref": "GoogleCloudDiscoveryengineV1alphaDocument", +"description": "The document data snippet in the suggestion. Only a subset of fields will be populated." +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponsePersonSuggestion": { +"description": "Suggestions as people.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponsePersonSuggestion", +"properties": { +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"type": "string" +}, +"document": { +"$ref": "GoogleCloudDiscoveryengineV1alphaDocument", +"description": "The document data snippet in the suggestion. Only a subset of fields is populated." +}, +"personType": { +"description": "The type of the person.", +"enum": [ +"PERSON_TYPE_UNSPECIFIED", +"CLOUD_IDENTITY", +"THIRD_PARTY_IDENTITY" +], +"enumDescriptions": [ +"Default value.", +"The suggestion is from a GOOGLE_IDENTITY source.", +"The suggestion is from a THIRD_PARTY_IDENTITY source." +], +"type": "string" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseQuerySuggestion": { +"description": "Suggestions as search queries.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseQuerySuggestion", +"properties": { +"completableFieldPaths": { +"description": "The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.", +"items": { +"type": "string" +}, +"type": "array" +}, +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"items": { +"type": "string" +}, +"type": "array" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseRecentSearchSuggestion": { +"description": "Suggestions from recent search history.", +"id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryResponseRecentSearchSuggestion", +"properties": { +"recentSearchTime": { +"description": "The time when this recent rearch happened.", +"format": "google-datetime", +"type": "string" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaAdvancedSiteSearchConfig": { "description": "Configuration data for advance site search.", "id": "GoogleCloudDiscoveryengineV1alphaAdvancedSiteSearchConfig", @@ -9854,6 +10304,10 @@ "description": "Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method.", "type": "boolean" }, +"groundingSpec": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestGroundingSpec", +"description": "Optional. Grounding specification." +}, "query": { "$ref": "GoogleCloudDiscoveryengineV1alphaQuery", "description": "Required. Current user query." @@ -9953,6 +10407,31 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestGroundingSpec": { +"description": "Grounding specification.", +"id": "GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestGroundingSpec", +"properties": { +"filteringLevel": { +"description": "Optional. Specifies whether to enable the filtering based on grounding score and at what level.", +"enum": [ +"FILTERING_LEVEL_UNSPECIFIED", +"FILTERING_LEVEL_LOW", +"FILTERING_LEVEL_HIGH" +], +"enumDescriptions": [ +"Default is no filter", +"Filter answers based on a low threshold.", +"Filter answers based on a high threshold." +], +"type": "string" +}, +"includeGroundingSupports": { +"description": "Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim.", +"type": "boolean" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestQueryUnderstandingSpec": { "description": "Query understanding specification.", "id": "GoogleCloudDiscoveryengineV1alphaAnswerQueryRequestQueryUnderstandingSpec", @@ -11756,6 +12235,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries": { +"description": "The historical crawl rate timeseries data, used for monitoring.", +"id": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"properties": { +"qpsTimeSeries": { +"$ref": "GoogleMonitoringV3TimeSeries", +"description": "The QPS of the crawl rate." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.CreateDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata", @@ -12125,6 +12615,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries": { +"description": "The historical dedicated crawl rate timeseries data, used for monitoring. Dedicated crawl is used by Vertex AI to crawl the user's website when dedicate crawl is set.", +"id": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"properties": { +"autoRefreshCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of auto-refresh, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and the crawl rate is for best effort use cases like refreshing urls periodically." +}, +"userTriggeredCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of user triggered crawl, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and user triggered crawl rate is for deterministic use cases like crawling urls or sitemaps specified by users." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.DeleteDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata", @@ -14243,6 +14748,55 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse": { +"description": "Response message for CrawlRateManagementService.ObtainCrawlRate method. The response contains organcic or dedicated crawl rate time series data for monitoring, depending on whether dedicated crawl rate is set.", +"id": "GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse", +"properties": { +"dedicatedCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"description": "The historical dedicated crawl rate timeseries data, used for monitoring." +}, +"error": { +"$ref": "GoogleRpcStatus", +"description": "Errors from service when handling the request." +}, +"organicCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"description": "The historical organic crawl rate timeseries data, used for monitoring." +}, +"state": { +"description": "Output only. The state of the response.", +"enum": [ +"STATE_UNSPECIFIED", +"SUCCEEDED", +"FAILED" +], +"enumDescriptions": [ +"The state is unspecified.", +"The state is successful.", +"The state is failed." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries": { +"description": "The historical organic crawl rate timeseries data, used for monitoring. Organic crawl is auto-determined by Google to crawl the user's website when dedicate crawl is not set. Crawl rate is the QPS of crawl request Google sends to the user's website.", +"id": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"properties": { +"googleOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Google's organic crawl rate time series, which is the sum of all googlebots' crawl rate. Please refer to https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers for more details about googlebots." +}, +"vertexAiOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's organic crawl rate time series, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is not set. Please refer to https://developers.google.com/search/docs/crawling-indexing/google-common-crawlers#google-cloudvertexbot for more details about Google-CloudVertexBot." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaPageInfo": { "description": "Detailed page information.", "id": "GoogleCloudDiscoveryengineV1alphaPageInfo", @@ -17456,7 +18010,7 @@ "type": "string" }, "eventType": { -"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", +"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", "type": "string" }, "filter": { @@ -20294,6 +20848,110 @@ }, "type": "object" }, +"GoogleMonitoringV3Point": { +"description": "A single data point in a time series.", +"id": "GoogleMonitoringV3Point", +"properties": { +"interval": { +"$ref": "GoogleMonitoringV3TimeInterval", +"description": "The time interval to which the data point applies. For `GAUGE` metrics, the start time is optional, but if it is supplied, it must equal the end time. For `DELTA` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For `CUMULATIVE` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +}, +"value": { +"$ref": "GoogleMonitoringV3TypedValue", +"description": "The value of the data point." +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeInterval": { +"description": "A time interval extending just after a start time through an end time. If the start time is the same as the end time, then the interval represents a single point in time.", +"id": "GoogleMonitoringV3TimeInterval", +"properties": { +"endTime": { +"description": "Required. The end of the time interval.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeSeries": { +"description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", +"id": "GoogleMonitoringV3TimeSeries", +"properties": { +"description": { +"description": "Input only. A detailed description of the time series that will be associated with the google.api.MetricDescriptor for the metric. Once set, this field cannot be changed through CreateTimeSeries.", +"type": "string" +}, +"metadata": { +"$ref": "GoogleApiMonitoredResourceMetadata", +"description": "Output only. The associated monitored resource metadata. When reading a time series, this field will include metadata labels that are explicitly named in the reduction. When creating a time series, this field is ignored." +}, +"metric": { +"$ref": "GoogleApiMetric", +"description": "The associated metric. A fully-specified metric used to identify the time series." +}, +"metricKind": { +"description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either `GAUGE` (the default) or `CUMULATIVE`.", +"enum": [ +"METRIC_KIND_UNSPECIFIED", +"GAUGE", +"DELTA", +"CUMULATIVE" +], +"enumDescriptions": [ +"Do not use this default value.", +"An instantaneous measurement of a value.", +"The change in a value during a time interval.", +"A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +], +"type": "string" +}, +"points": { +"description": "The data points of this time series. When listing time series, points are returned in reverse time order. When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.", +"items": { +"$ref": "GoogleMonitoringV3Point" +}, +"type": "array" +}, +"resource": { +"$ref": "GoogleApiMonitoredResource", +"description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data. For more information, see [Monitored resources for custom metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources)." +}, +"unit": { +"description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. This field can only be changed through CreateTimeSeries when it is empty.", +"type": "string" +}, +"valueType": { +"description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the type of the data in the `points` field.", +"enum": [ +"VALUE_TYPE_UNSPECIFIED", +"BOOL", +"INT64", +"DOUBLE", +"STRING", +"DISTRIBUTION", +"MONEY" +], +"enumDescriptions": [ +"Do not use this default value.", +"The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a signed 64-bit integer.", +"The value is a double precision floating point number.", +"The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a `Distribution`.", +"The value is money." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleMonitoringV3TypedValue": { "description": "A single strongly-typed value.", "id": "GoogleMonitoringV3TypedValue", diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index 3547769c7c..94501db3ff 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -854,6 +854,38 @@ } } }, +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/dataStores/{dataStoresId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.dataStores.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/dataStores/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "completionSuggestions": { "methods": { "import": { @@ -1847,6 +1879,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -2955,6 +3015,38 @@ } }, "resources": { +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "controls": { "methods": { "create": { @@ -3557,6 +3649,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -4438,6 +4558,38 @@ } } }, +"completionConfig": { +"methods": { +"completeQuery": { +"description": "Completes the user input with advanced keyword suggestions.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/dataStores/{dataStoresId}/completionConfig:completeQuery", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.dataStores.completionConfig.completeQuery", +"parameterOrder": [ +"completionConfig" +], +"parameters": { +"completionConfig": { +"description": "Required. The completion_config of the parent dataStore or engine resource name for which the completion is performed, such as `projects/*/locations/global/collections/default_collection/dataStores/*/completionConfig` `projects/*/locations/global/collections/default_collection/engines/*/completionConfig`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/dataStores/[^/]+/completionConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+completionConfig}:completeQuery", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "completionSuggestions": { "methods": { "import": { @@ -5330,6 +5482,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"searchLite": { +"description": "Performs a search. Similar to the SearchService.Search method, but a lite version that allows API key for authentication, where OAuth and IAM checks are not required. Only public website search is supported by this method. If data stores and engines not associated with public website search are specified, a `FAILED_PRECONDITION` error is returned. This method can be used for easy onboarding without having to implement an authentication backend. However, it is strongly recommended to use SearchService.Search instead with required OAuth and IAM checks to provide better data security.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/dataStores/{dataStoresId}/servingConfigs/{servingConfigsId}:searchLite", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.dataStores.servingConfigs.searchLite", +"parameterOrder": [ +"servingConfig" +], +"parameters": { +"servingConfig": { +"description": "Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/dataStores/[^/]+/servingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+servingConfig}:searchLite", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } }, @@ -6880,9 +7060,177 @@ } } }, -"revision": "20241020", +"revision": "20241025", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { +"GoogleApiDistribution": { +"description": "`Distribution` contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets. The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths. Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the `mean` and `sum_of_squared_deviation` fields meaningless.", +"id": "GoogleApiDistribution", +"properties": { +"bucketCounts": { +"description": "The number of values in each bucket of the histogram, as described in `bucket_options`. If the distribution does not have a histogram, then omit this field. If there is a histogram, then the sum of the values in `bucket_counts` must equal the value in the `count` field of the distribution. If present, `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If you supply fewer than N values, the remaining values are assumed to be 0. The order of the values in `bucket_counts` follows the bucket numbering schemes described for the three bucket types. The first value must be the count for the underflow bucket (number 0). The next N-2 values are the counts for the finite buckets (number 1 through N-2). The N'th value in `bucket_counts` is the count for the overflow bucket (number N-1).", +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"bucketOptions": { +"$ref": "GoogleApiDistributionBucketOptions", +"description": "Defines the histogram bucket boundaries. If the distribution does not contain a histogram, then omit this field." +}, +"count": { +"description": "The number of values in the population. Must be non-negative. This value must equal the sum of the values in `bucket_counts` if a histogram is provided.", +"format": "int64", +"type": "string" +}, +"exemplars": { +"description": "Must be in increasing order of `value` field.", +"items": { +"$ref": "GoogleApiDistributionExemplar" +}, +"type": "array" +}, +"mean": { +"description": "The arithmetic mean of the values in the population. If `count` is zero then this field must be zero.", +"format": "double", +"type": "number" +}, +"range": { +"$ref": "GoogleApiDistributionRange", +"description": "If specified, contains the range of the population values. The field must not be present if the `count` is zero." +}, +"sumOfSquaredDeviation": { +"description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is: Sum[i=1..n]((x_i - mean)^2) Knuth, \"The Art of Computer Programming\", Vol. 2, page 232, 3rd edition describes Welford's method for accumulating this sum in one pass. If `count` is zero then this field must be zero.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptions": { +"description": "`BucketOptions` describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. `BucketOptions` does not include the number of values in each bucket. A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", +"id": "GoogleApiDistributionBucketOptions", +"properties": { +"explicitBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsExplicit", +"description": "The explicit buckets." +}, +"exponentialBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsExponential", +"description": "The exponential buckets." +}, +"linearBuckets": { +"$ref": "GoogleApiDistributionBucketOptionsLinear", +"description": "The linear bucket." +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsExplicit": { +"description": "Specifies a set of buckets with arbitrary widths. There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): bounds[i] Lower bound (1 <= i < N); bounds[i - 1] The `bounds` field must contain at least one element. If `bounds` has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", +"id": "GoogleApiDistributionBucketOptionsExplicit", +"properties": { +"bounds": { +"description": "The values must be monotonically increasing.", +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsExponential": { +"description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket. There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).", +"id": "GoogleApiDistributionBucketOptionsExponential", +"properties": { +"growthFactor": { +"description": "Must be greater than 1.", +"format": "double", +"type": "number" +}, +"numFiniteBuckets": { +"description": "Must be greater than 0.", +"format": "int32", +"type": "integer" +}, +"scale": { +"description": "Must be greater than 0.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionBucketOptionsLinear": { +"description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket. There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the following boundaries: Upper bound (0 <= i < N-1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).", +"id": "GoogleApiDistributionBucketOptionsLinear", +"properties": { +"numFiniteBuckets": { +"description": "Must be greater than 0.", +"format": "int32", +"type": "integer" +}, +"offset": { +"description": "Lower bound of the first bucket.", +"format": "double", +"type": "number" +}, +"width": { +"description": "Must be greater than 0.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionExemplar": { +"description": "Exemplars are example points that may be used to annotate aggregated distribution values. They are metadata that gives information about a particular value added to a Distribution bucket, such as a trace ID that was active when a value was added. They may contain further information, such as a example values and timestamps, origin, etc.", +"id": "GoogleApiDistributionExemplar", +"properties": { +"attachments": { +"description": "Contextual information about the example value. Examples are: Trace: type.googleapis.com/google.monitoring.v3.SpanContext Literal string: type.googleapis.com/google.protobuf.StringValue Labels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabels There may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system.", +"items": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"type": "object" +}, +"type": "array" +}, +"timestamp": { +"description": "The observation (sampling) time of the above value.", +"format": "google-datetime", +"type": "string" +}, +"value": { +"description": "Value of the exemplar point. This value determines to which bucket the exemplar belongs.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleApiDistributionRange": { +"description": "The range of the population values.", +"id": "GoogleApiDistributionRange", +"properties": { +"max": { +"description": "The maximum of the population values.", +"format": "double", +"type": "number" +}, +"min": { +"description": "The minimum of the population values.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, "GoogleApiHttpBody": { "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", "id": "GoogleApiHttpBody", @@ -6910,6 +7258,64 @@ }, "type": "object" }, +"GoogleApiMetric": { +"description": "A specific metric, identified by specifying values for all of the labels of a `MetricDescriptor`.", +"id": "GoogleApiMetric", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "The set of label values that uniquely identify this metric. All labels listed in the `MetricDescriptor` must be assigned values.", +"type": "object" +}, +"type": { +"description": "An existing metric type, see google.api.MetricDescriptor. For example, `custom.googleapis.com/invoice/paid/amount`.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleApiMonitoredResource": { +"description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The `type` field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the `labels` field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for `\"gce_instance\"` has labels `\"project_id\"`, `\"instance_id\"` and `\"zone\"`: { \"type\": \"gce_instance\", \"labels\": { \"project_id\": \"my-project\", \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }}", +"id": "GoogleApiMonitoredResource", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `\"project_id\"`, `\"instance_id\"`, and `\"zone\"`.", +"type": "object" +}, +"type": { +"description": "Required. The monitored resource type. This field must match the `type` field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleApiMonitoredResourceMetadata": { +"description": "Auxiliary metadata for a MonitoredResource object. MonitoredResource objects contain the minimum set of information to uniquely identify a monitored resource instance. There is some other useful auxiliary metadata. Monitoring and Logging use an ingestion pipeline to extract metadata for cloud resources of all types, and store the metadata in this message.", +"id": "GoogleApiMonitoredResourceMetadata", +"properties": { +"systemLabels": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example: { \"name\": \"my-test-instance\", \"security_group\": [\"a\", \"b\", \"c\"], \"spot_instance\": false }", +"type": "object" +}, +"userLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. A map of user-defined metadata labels.", +"type": "object" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineLoggingErrorContext": { "description": "A description of the context in which an error occurred.", "id": "GoogleCloudDiscoveryengineLoggingErrorContext", @@ -9447,6 +9853,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries": { +"description": "The historical crawl rate timeseries data, used for monitoring.", +"id": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"properties": { +"qpsTimeSeries": { +"$ref": "GoogleMonitoringV3TimeSeries", +"description": "The QPS of the crawl rate." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.CreateDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaCreateDataStoreMetadata", @@ -9779,6 +10196,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries": { +"description": "The historical dedicated crawl rate timeseries data, used for monitoring. Dedicated crawl is used by Vertex AI to crawl the user's website when dedicate crawl is set.", +"id": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"properties": { +"autoRefreshCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of auto-refresh, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and the crawl rate is for best effort use cases like refreshing urls periodically." +}, +"userTriggeredCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's dedicated crawl rate time series of user triggered crawl, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is set, and user triggered crawl rate is for deterministic use cases like crawling urls or sitemaps specified by users." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata": { "description": "Metadata related to the progress of the DataStoreService.DeleteDataStore operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaDeleteDataStoreMetadata", @@ -10955,20 +11387,69 @@ "mode": { "description": "Mode of Natural Language Query Understanding. If this field is unset, the behavior defaults to NaturalLanguageQueryUnderstandingConfig.Mode.DISABLED.", "enum": [ -"MODE_UNSPECIFIED", -"DISABLED", -"ENABLED" +"MODE_UNSPECIFIED", +"DISABLED", +"ENABLED" +], +"enumDescriptions": [ +"Default value.", +"Natural Language Query Understanding is disabled.", +"Natural Language Query Understanding is enabled." +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse": { +"description": "Response message for CrawlRateManagementService.ObtainCrawlRate method. The response contains organcic or dedicated crawl rate time series data for monitoring, depending on whether dedicated crawl rate is set.", +"id": "GoogleCloudDiscoveryengineV1alphaObtainCrawlRateResponse", +"properties": { +"dedicatedCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaDedicatedCrawlRateTimeSeries", +"description": "The historical dedicated crawl rate timeseries data, used for monitoring." +}, +"error": { +"$ref": "GoogleRpcStatus", +"description": "Errors from service when handling the request." +}, +"organicCrawlRateTimeSeries": { +"$ref": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"description": "The historical organic crawl rate timeseries data, used for monitoring." +}, +"state": { +"description": "Output only. The state of the response.", +"enum": [ +"STATE_UNSPECIFIED", +"SUCCEEDED", +"FAILED" ], "enumDescriptions": [ -"Default value.", -"Natural Language Query Understanding is disabled.", -"Natural Language Query Understanding is enabled." +"The state is unspecified.", +"The state is successful.", +"The state is failed." ], +"readOnly": true, "type": "string" } }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries": { +"description": "The historical organic crawl rate timeseries data, used for monitoring. Organic crawl is auto-determined by Google to crawl the user's website when dedicate crawl is not set. Crawl rate is the QPS of crawl request Google sends to the user's website.", +"id": "GoogleCloudDiscoveryengineV1alphaOrganicCrawlRateTimeSeries", +"properties": { +"googleOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Google's organic crawl rate time series, which is the sum of all googlebots' crawl rate. Please refer to https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers for more details about googlebots." +}, +"vertexAiOrganicCrawlRate": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCrawlRateTimeSeries", +"description": "Vertex AI's organic crawl rate time series, which is the crawl rate of Google-CloudVertexBot when dedicate crawl is not set. Please refer to https://developers.google.com/search/docs/crawling-indexing/google-common-crawlers#google-cloudvertexbot for more details about Google-CloudVertexBot." +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaProject": { "description": "Metadata and configurations for a Google Cloud project in the service.", "id": "GoogleCloudDiscoveryengineV1alphaProject", @@ -12529,6 +13010,236 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest": { +"description": "Request message for CompletionService.AdvancedCompleteQuery method. .", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequest", +"properties": { +"boostSpec": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpec", +"description": "Optional. Specification to boost suggestions matching the condition." +}, +"includeTailSuggestions": { +"description": "Indicates if tail suggestions should be returned if there are no suggestions that match the full query. Even if set to true, if there are suggestions that match the full query, those are returned and no tail suggestions are returned.", +"type": "boolean" +}, +"query": { +"description": "Required. The typeahead input used to fetch suggestions. Maximum length is 128 characters. The query can not be empty for most of the suggestion types. If it is empty, an `INVALID_ARGUMENT` error is returned. The exception is when the suggestion_types contains only the type `RECENT_SEARCH`, the query can be an empty string. The is called \"zero prefix\" feature, which returns user's recently searched queries given the empty query.", +"type": "string" +}, +"queryModel": { +"description": "Specifies the autocomplete data model. This overrides any model specified in the Configuration > Autocomplete section of the Cloud console. Currently supported values: * `document` - Using suggestions generated from user-imported documents. * `search-history` - Using suggestions generated from the past history of SearchService.Search API calls. Do not use it when there is no traffic for Search API. * `user-event` - Using suggestions generated from user-imported search events. * `document-completable` - Using suggestions taken directly from user-imported document fields marked as completable. Default values: * `document` is the default model for regular dataStores. * `search-history` is the default model for site search dataStores.", +"type": "string" +}, +"suggestionTypes": { +"description": "Optional. Suggestion types to return. If empty or unspecified, query suggestions are returned. Only one suggestion type is supported at the moment.", +"items": { +"enum": [ +"SUGGESTION_TYPE_UNSPECIFIED", +"QUERY", +"PEOPLE", +"CONTENT", +"RECENT_SEARCH", +"GOOGLE_WORKSPACE" +], +"enumDescriptions": [ +"Default value.", +"Returns query suggestions.", +"Returns people suggestions.", +"Returns content suggestions.", +"Returns recent search suggestions.", +"Returns Google Workspace suggestions." +], +"type": "string" +}, +"type": "array" +}, +"userInfo": { +"$ref": "GoogleCloudDiscoveryengineV1betaUserInfo", +"description": "Optional. Information about the end user. This should be the same identifier information as UserEvent.user_info and SearchRequest.user_info." +}, +"userPseudoId": { +"description": "A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and SearchRequest.user_pseudo_id. The field must be a UTF-8 encoded string with a length limit of 128", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpec": { +"description": "Specification to boost suggestions based on the condtion of the suggestion.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpec", +"properties": { +"conditionBoostSpecs": { +"description": "Condition boost specifications. If a suggestion matches multiple conditions in the specifictions, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec": { +"description": "Boost applies to suggestions which match a condition.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpecConditionBoostSpec", +"properties": { +"boost": { +"description": "Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored.", +"format": "float", +"type": "number" +}, +"condition": { +"description": "An expression which specifies a boost condition. The syntax is the same as [filter expression syntax](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax). Currently, the only supported condition is a list of BCP-47 lang codes. Example: * To boost suggestions in languages `en` or `fr`: `(lang_code: ANY(\"en\", \"fr\"))`", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse": { +"description": "Response message for CompletionService.AdvancedCompleteQuery method.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponse", +"properties": { +"contentSuggestions": { +"description": "Results of the matched content suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseContentSuggestion" +}, +"type": "array" +}, +"peopleSuggestions": { +"description": "Results of the matched people suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponsePersonSuggestion" +}, +"type": "array" +}, +"querySuggestions": { +"description": "Results of the matched query suggestions. The result list is ordered and the first result is a top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseQuerySuggestion" +}, +"type": "array" +}, +"recentSearchSuggestions": { +"description": "Results of the matched \"recent search\" suggestions. The result list is ordered and the first result is the top suggestion.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseRecentSearchSuggestion" +}, +"type": "array" +}, +"tailMatchTriggered": { +"description": "True if the returned suggestions are all tail suggestions. For tail matching to be triggered, include_tail_suggestions in the request must be true and there must be no suggestions that match the full query.", +"type": "boolean" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseContentSuggestion": { +"description": "Suggestions as content.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseContentSuggestion", +"properties": { +"contentType": { +"description": "The type of the content suggestion.", +"enum": [ +"CONTENT_TYPE_UNSPECIFIED", +"GOOGLE_WORKSPACE", +"THIRD_PARTY" +], +"enumDescriptions": [ +"Default value.", +"The suggestion is from a Google Workspace source.", +"The suggestion is from a third party source." +], +"type": "string" +}, +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"type": "string" +}, +"document": { +"$ref": "GoogleCloudDiscoveryengineV1betaDocument", +"description": "The document data snippet in the suggestion. Only a subset of fields will be populated." +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponsePersonSuggestion": { +"description": "Suggestions as people.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponsePersonSuggestion", +"properties": { +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"type": "string" +}, +"document": { +"$ref": "GoogleCloudDiscoveryengineV1betaDocument", +"description": "The document data snippet in the suggestion. Only a subset of fields is populated." +}, +"personType": { +"description": "The type of the person.", +"enum": [ +"PERSON_TYPE_UNSPECIFIED", +"CLOUD_IDENTITY", +"THIRD_PARTY_IDENTITY" +], +"enumDescriptions": [ +"Default value.", +"The suggestion is from a GOOGLE_IDENTITY source.", +"The suggestion is from a THIRD_PARTY_IDENTITY source." +], +"type": "string" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseQuerySuggestion": { +"description": "Suggestions as search queries.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseQuerySuggestion", +"properties": { +"completableFieldPaths": { +"description": "The unique document field paths that serve as the source of this suggestion if it was generated from completable fields. This field is only populated for the document-completable model.", +"items": { +"type": "string" +}, +"type": "array" +}, +"dataStore": { +"description": "The name of the dataStore that this suggestion belongs to.", +"items": { +"type": "string" +}, +"type": "array" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseRecentSearchSuggestion": { +"description": "Suggestions from recent search history.", +"id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryResponseRecentSearchSuggestion", +"properties": { +"recentSearchTime": { +"description": "The time when this recent rearch happened.", +"format": "google-datetime", +"type": "string" +}, +"suggestion": { +"description": "The suggestion for the query.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaAdvancedSiteSearchConfig": { "description": "Configuration data for advance site search.", "id": "GoogleCloudDiscoveryengineV1betaAdvancedSiteSearchConfig", @@ -12720,6 +13431,10 @@ "description": "Deprecated: This field is deprecated. Streaming Answer API will be supported. Asynchronous mode control. If enabled, the response will be returned with answer/session resource name without final answer. The API users need to do the polling to get the latest status of answer/session by calling ConversationalSearchService.GetAnswer or ConversationalSearchService.GetSession method.", "type": "boolean" }, +"groundingSpec": { +"$ref": "GoogleCloudDiscoveryengineV1betaAnswerQueryRequestGroundingSpec", +"description": "Optional. Grounding specification." +}, "query": { "$ref": "GoogleCloudDiscoveryengineV1betaQuery", "description": "Required. Current user query." @@ -12819,6 +13534,31 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaAnswerQueryRequestGroundingSpec": { +"description": "Grounding specification.", +"id": "GoogleCloudDiscoveryengineV1betaAnswerQueryRequestGroundingSpec", +"properties": { +"filteringLevel": { +"description": "Optional. Specifies whether to enable the filtering based on grounding score and at what level.", +"enum": [ +"FILTERING_LEVEL_UNSPECIFIED", +"FILTERING_LEVEL_LOW", +"FILTERING_LEVEL_HIGH" +], +"enumDescriptions": [ +"Default is no filter", +"Filter answers based on a low threshold.", +"Filter answers based on a high threshold." +], +"type": "string" +}, +"includeGroundingSupports": { +"description": "Optional. Specifies whether to include grounding_supports in the answer. The default value is `false`. When this field is set to `true`, returned answer will have `grounding_score` and will contain GroundingSupports for each claim.", +"type": "boolean" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaAnswerQueryRequestQueryUnderstandingSpec": { "description": "Query understanding specification.", "id": "GoogleCloudDiscoveryengineV1betaAnswerQueryRequestQueryUnderstandingSpec", @@ -19239,7 +19979,7 @@ "type": "string" }, "eventType": { -"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", +"description": "Required. User event type. Allowed values are: Generic values: * `search`: Search for Documents. * `view-item`: Detailed page view of a Document. * `view-item-list`: View of a panel or ordered list of Documents. * `view-home-page`: View of the home page. * `view-category-page`: View of a category page, e.g. Home > Men > Jeans * `add-feedback`: Add a user feedback. Retail-related values: * `add-to-cart`: Add an item(s) to cart, e.g. in Retail online shopping * `purchase`: Purchase an item(s) Media-related values: * `media-play`: Start/resume watching a video, playing a song, etc. * `media-complete`: Finished or stopped midway through a video, song, etc.", "type": "string" }, "filter": { @@ -19412,6 +20152,139 @@ }, "type": "object" }, +"GoogleMonitoringV3Point": { +"description": "A single data point in a time series.", +"id": "GoogleMonitoringV3Point", +"properties": { +"interval": { +"$ref": "GoogleMonitoringV3TimeInterval", +"description": "The time interval to which the data point applies. For `GAUGE` metrics, the start time is optional, but if it is supplied, it must equal the end time. For `DELTA` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For `CUMULATIVE` metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +}, +"value": { +"$ref": "GoogleMonitoringV3TypedValue", +"description": "The value of the data point." +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeInterval": { +"description": "A time interval extending just after a start time through an end time. If the start time is the same as the end time, then the interval represents a single point in time.", +"id": "GoogleMonitoringV3TimeInterval", +"properties": { +"endTime": { +"description": "Required. The end of the time interval.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleMonitoringV3TimeSeries": { +"description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", +"id": "GoogleMonitoringV3TimeSeries", +"properties": { +"description": { +"description": "Input only. A detailed description of the time series that will be associated with the google.api.MetricDescriptor for the metric. Once set, this field cannot be changed through CreateTimeSeries.", +"type": "string" +}, +"metadata": { +"$ref": "GoogleApiMonitoredResourceMetadata", +"description": "Output only. The associated monitored resource metadata. When reading a time series, this field will include metadata labels that are explicitly named in the reduction. When creating a time series, this field is ignored." +}, +"metric": { +"$ref": "GoogleApiMetric", +"description": "The associated metric. A fully-specified metric used to identify the time series." +}, +"metricKind": { +"description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either `GAUGE` (the default) or `CUMULATIVE`.", +"enum": [ +"METRIC_KIND_UNSPECIFIED", +"GAUGE", +"DELTA", +"CUMULATIVE" +], +"enumDescriptions": [ +"Do not use this default value.", +"An instantaneous measurement of a value.", +"The change in a value during a time interval.", +"A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." +], +"type": "string" +}, +"points": { +"description": "The data points of this time series. When listing time series, points are returned in reverse time order. When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.", +"items": { +"$ref": "GoogleMonitoringV3Point" +}, +"type": "array" +}, +"resource": { +"$ref": "GoogleApiMonitoredResource", +"description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data. For more information, see [Monitored resources for custom metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources)." +}, +"unit": { +"description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. This field can only be changed through CreateTimeSeries when it is empty.", +"type": "string" +}, +"valueType": { +"description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series. When creating a time series, this field is optional. If present, it must be the same as the type of the data in the `points` field.", +"enum": [ +"VALUE_TYPE_UNSPECIFIED", +"BOOL", +"INT64", +"DOUBLE", +"STRING", +"DISTRIBUTION", +"MONEY" +], +"enumDescriptions": [ +"Do not use this default value.", +"The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a signed 64-bit integer.", +"The value is a double precision floating point number.", +"The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", +"The value is a `Distribution`.", +"The value is money." +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleMonitoringV3TypedValue": { +"description": "A single strongly-typed value.", +"id": "GoogleMonitoringV3TypedValue", +"properties": { +"boolValue": { +"description": "A Boolean value: `true` or `false`.", +"type": "boolean" +}, +"distributionValue": { +"$ref": "GoogleApiDistribution", +"description": "A distribution value." +}, +"doubleValue": { +"description": "A 64-bit double-precision floating-point number. Its magnitude is approximately \u00b110\u00b1300 and it has 16 significant digits of precision.", +"format": "double", +"type": "number" +}, +"int64Value": { +"description": "A 64-bit integer. Its range is approximately \u00b19.2x1018.", +"format": "int64", +"type": "string" +}, +"stringValue": { +"description": "A variable-length string value.", +"type": "string" +} +}, +"type": "object" +}, "GoogleProtobufEmpty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "GoogleProtobufEmpty", From 832401daefb81e7aa966179744d1f3ff453281ce Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 08/18] feat(firebaseappdistribution): update the api #### firebaseappdistribution:v1alpha The following keys were added: - schemas.GoogleFirebaseAppdistroV1alphaReleaseTest.properties.displayName.type (Total Keys: 1) --- ...ppdistribution_v1alpha.projects.apps.releases.tests.html | 4 ++++ .../documents/firebaseappdistribution.v1alpha.json | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html index f53c2c540f..87b719a610 100644 --- a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html @@ -229,6 +229,7 @@

Method Details

"videoUri": "A String", # Output only. A URI to a video of the test run. }, ], + "displayName": "A String", # Optional. Display name of the release test. Required if the release test is created with multiple goals "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only. "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html @@ -355,6 +356,7 @@

Method Details

"videoUri": "A String", # Output only. A URI to a video of the test run. }, ], + "displayName": "A String", # Optional. Display name of the release test. Required if the release test is created with multiple goals "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only. "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html @@ -487,6 +489,7 @@

Method Details

"videoUri": "A String", # Output only. A URI to a video of the test run. }, ], + "displayName": "A String", # Optional. Display name of the release test. Required if the release test is created with multiple goals "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only. "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html @@ -629,6 +632,7 @@

Method Details

"videoUri": "A String", # Output only. A URI to a video of the test run. }, ], + "displayName": "A String", # Optional. Display name of the release test. Required if the release test is created with multiple goals "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only. "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html diff --git a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json index 9a9f23474f..86efbdec56 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json @@ -625,7 +625,7 @@ } } }, -"revision": "20241018", +"revision": "20241028", "rootUrl": "https://firebaseappdistribution.googleapis.com/", "schemas": { "AndroidxCrawlerOutputPoint": { @@ -1488,6 +1488,10 @@ }, "type": "array" }, +"displayName": { +"description": "Optional. Display name of the release test. Required if the release test is created with multiple goals", +"type": "string" +}, "loginCredential": { "$ref": "GoogleFirebaseAppdistroV1alphaLoginCredential", "description": "Optional. Input only. Login credentials for the test. Input only." From d29235d7733ffd9b7b16b5232cf785a2c668a5ca Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:47 +0000 Subject: [PATCH 09/18] feat(iamcredentials): update the api #### iamcredentials:v1 The following keys were added: - resources.projects.resources.serviceAccounts.methods.getAllowedLocations (Total Keys: 10) - schemas.ServiceAccountAllowedLocations (Total Keys: 7) --- ...edentials_v1.projects.serviceAccounts.html | 25 +++++++++++ .../documents/iamcredentials.v1.json | 44 ++++++++++++++++++- 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/docs/dyn/iamcredentials_v1.projects.serviceAccounts.html b/docs/dyn/iamcredentials_v1.projects.serviceAccounts.html index 19d849f773..4ef6acb365 100644 --- a/docs/dyn/iamcredentials_v1.projects.serviceAccounts.html +++ b/docs/dyn/iamcredentials_v1.projects.serviceAccounts.html @@ -83,6 +83,9 @@

Instance Methods

generateIdToken(name, body=None, x__xgafv=None)

Generates an OpenID Connect ID token for a service account.

+

+ getAllowedLocations(name, x__xgafv=None)

+

Returns the trust boundary info for a given service account.

signBlob(name, body=None, x__xgafv=None)

Signs a blob using a service account's system-managed private key.

@@ -158,6 +161,28 @@

Method Details

}
+
+ getAllowedLocations(name, x__xgafv=None) +
Returns the trust boundary info for a given service account.
+
+Args:
+  name: string, Required. Resource name of service account. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a list of allowed locations for given service account.
+  "encodedLocations": "A String", # Output only. The hex encoded bitmap of the trust boundary locations
+  "locations": [ # Output only. The human readable trust boundary locations. For example, ["us-central1", "europe-west1"]
+    "A String",
+  ],
+}
+
+
signBlob(name, body=None, x__xgafv=None)
Signs a blob using a service account's system-managed private key.
diff --git a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
index b62e532398..cb401fd524 100644
--- a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
+++ b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
@@ -165,6 +165,28 @@
 "https://www.googleapis.com/auth/cloud-platform"
 ]
 },
+"getAllowedLocations": {
+"description": "Returns the trust boundary info for a given service account.",
+"flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/allowedLocations",
+"httpMethod": "GET",
+"id": "iamcredentials.projects.serviceAccounts.getAllowedLocations",
+"parameterOrder": [
+"name"
+],
+"parameters": {
+"name": {
+"description": "Required. Resource name of service account.",
+"location": "path",
+"pattern": "^projects/[^/]+/serviceAccounts/[^/]+$",
+"required": true,
+"type": "string"
+}
+},
+"path": "v1/{+name}/allowedLocations",
+"response": {
+"$ref": "ServiceAccountAllowedLocations"
+}
+},
 "signBlob": {
 "description": "Signs a blob using a service account's system-managed private key.",
 "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob",
@@ -226,7 +248,7 @@
 }
 }
 },
-"revision": "20240624",
+"revision": "20241024",
 "rootUrl": "https://iamcredentials.googleapis.com/",
 "schemas": {
 "GenerateAccessTokenRequest": {
@@ -300,6 +322,26 @@
 },
 "type": "object"
 },
+"ServiceAccountAllowedLocations": {
+"description": "Represents a list of allowed locations for given service account.",
+"id": "ServiceAccountAllowedLocations",
+"properties": {
+"encodedLocations": {
+"description": "Output only. The hex encoded bitmap of the trust boundary locations",
+"readOnly": true,
+"type": "string"
+},
+"locations": {
+"description": "Output only. The human readable trust boundary locations. For example, [\"us-central1\", \"europe-west1\"]",
+"items": {
+"type": "string"
+},
+"readOnly": true,
+"type": "array"
+}
+},
+"type": "object"
+},
 "SignBlobRequest": {
 "id": "SignBlobRequest",
 "properties": {

From d7f6f8b70361d19d02afcd9eba3d873402bafdb7 Mon Sep 17 00:00:00 2001
From: Yoshi Automation 
Date: Wed, 30 Oct 2024 20:33:48 +0000
Subject: [PATCH 10/18] feat(identitytoolkit): update the api

#### identitytoolkit:v2

The following keys were added:
- schemas.GoogleCloudIdentitytoolkitAdminV2RecaptchaConfig.properties.useSmsBotScore.type (Total Keys: 1)
- schemas.GoogleCloudIdentitytoolkitAdminV2RecaptchaConfig.properties.useSmsTollFraudProtection.type (Total Keys: 1)
- schemas.GoogleCloudIdentitytoolkitV2RecaptchaConfig.properties.useSmsBotScore.type (Total Keys: 1)
- schemas.GoogleCloudIdentitytoolkitV2RecaptchaConfig.properties.useSmsTollFraudProtection.type (Total Keys: 1)
---
 docs/dyn/identitytoolkit_v1.accounts.html     | 12 ++--
 ...titytoolkit_v2.accounts.mfaEnrollment.html |  2 +-
 ...identitytoolkit_v2.accounts.mfaSignIn.html |  2 +-
 docs/dyn/identitytoolkit_v2.projects.html     | 36 ++++++----
 .../identitytoolkit_v2.projects.tenants.html  | 72 +++++++++++--------
 docs/dyn/identitytoolkit_v2.v2.html           |  2 +
 .../documents/identitytoolkit.v1.json         | 14 ++--
 .../documents/identitytoolkit.v2.json         | 30 ++++++--
 8 files changed, 103 insertions(+), 67 deletions(-)

diff --git a/docs/dyn/identitytoolkit_v1.accounts.html b/docs/dyn/identitytoolkit_v1.accounts.html
index b70e5ba0d9..7e57f540e8 100644
--- a/docs/dyn/identitytoolkit_v1.accounts.html
+++ b/docs/dyn/identitytoolkit_v1.accounts.html
@@ -445,19 +445,19 @@ 

Method Details

body: object, The request body. The object takes the form of: -{ # Request message for SendVerificationCode. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. +{ # Request message for SendVerificationCode. 'captcha_response' is required when reCAPTCHA enterprise is enabled, or otherwise at least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. "autoRetrievalInfo": { # The information required to auto-retrieve an SMS. # Android only. Used by Google Play Services to identify the app for auto-retrieval. "appSignatureHash": "A String", # The Android app's signature hash for Google Play Service's SMS Retriever API. }, "captchaResponse": "A String", # Optional. The reCAPTCHA Enterprise token provided by the reCAPTCHA client-side integration. Required when reCAPTCHA enterprise is enabled. "clientType": "A String", # Optional. The client type, web, android or ios. Required when reCAPTCHA Enterprise is enabled. - "iosReceipt": "A String", # Receipt of successful iOS app token validation. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. This should come from the response of verifyIosClient. If present, the caller should also provide the `ios_secret`, as well as a bundle ID in the `x-ios-bundle-identifier` header, which must match the bundle ID from the verifyIosClient request. + "iosReceipt": "A String", # Receipt of successful iOS app token validation. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). This should come from the response of verifyIosClient. If present, the caller should also provide the `ios_secret`, as well as a bundle ID in the `x-ios-bundle-identifier` header, which must match the bundle ID from the verifyIosClient request. "iosSecret": "A String", # Secret delivered to iOS app as a push notification. Should be passed with an `ios_receipt` as well as the `x-ios-bundle-identifier` header. "phoneNumber": "A String", # The phone number to send the verification code to in E.164 format. - "playIntegrityToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token (and safety_net_token). At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, , or `play_integrity_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. A Play Integrity Token can be generated via the [PlayIntegrity API](https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce. - "recaptchaToken": "A String", # Recaptcha token for app verification. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. The recaptcha should be generated by calling getRecaptchaParams and the recaptcha token will be generated on user completion of the recaptcha challenge. - "recaptchaVersion": "A String", # Optional. The reCAPTCHA version of the reCAPTCHA token in the captcha_response. - "safetyNetToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce. + "playIntegrityToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token (and safety_net_token). At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, , or `play_integrity_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). A Play Integrity Token can be generated via the [PlayIntegrity API](https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce. + "recaptchaToken": "A String", # Recaptcha token for app verification. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). The recaptcha should be generated by calling getRecaptchaParams and the recaptcha token will be generated on user completion of the recaptcha challenge. + "recaptchaVersion": "A String", # Optional. The reCAPTCHA version of the reCAPTCHA token in the captcha_response. Required when reCAPTCHA Enterprise is enabled. + "safetyNetToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce. "tenantId": "A String", # Tenant ID of the Identity Platform tenant the user is signing in to. } diff --git a/docs/dyn/identitytoolkit_v2.accounts.mfaEnrollment.html b/docs/dyn/identitytoolkit_v2.accounts.mfaEnrollment.html index cfebe4ba56..4569aa2206 100644 --- a/docs/dyn/identitytoolkit_v2.accounts.mfaEnrollment.html +++ b/docs/dyn/identitytoolkit_v2.accounts.mfaEnrollment.html @@ -158,7 +158,7 @@

Method Details

"phoneNumber": "A String", # Required for enrollment. Phone number to be enrolled as MFA. "playIntegrityToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token (or safety net token). A Play Integrity Token can be generated via the [PlayIntegrity API] (https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce. "recaptchaToken": "A String", # Web only. Recaptcha solution. - "recaptchaVersion": "A String", # The reCAPTCHA version of the reCAPTCHA token in the captcha_response. + "recaptchaVersion": "A String", # The reCAPTCHA version of the reCAPTCHA token in the captcha_response. Required when reCAPTCHA Enterprise is enabled. "safetyNetToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token. A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce. }, "tenantId": "A String", # The ID of the Identity Platform tenant that the user enrolling MFA belongs to. If not set, the user belongs to the default Identity Platform project. diff --git a/docs/dyn/identitytoolkit_v2.accounts.mfaSignIn.html b/docs/dyn/identitytoolkit_v2.accounts.mfaSignIn.html index 7c30d866e6..2b1ac70012 100644 --- a/docs/dyn/identitytoolkit_v2.accounts.mfaSignIn.html +++ b/docs/dyn/identitytoolkit_v2.accounts.mfaSignIn.html @@ -153,7 +153,7 @@

Method Details

"phoneNumber": "A String", # Required for enrollment. Phone number to be enrolled as MFA. "playIntegrityToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token (or safety net token). A Play Integrity Token can be generated via the [PlayIntegrity API] (https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce. "recaptchaToken": "A String", # Web only. Recaptcha solution. - "recaptchaVersion": "A String", # The reCAPTCHA version of the reCAPTCHA token in the captcha_response. + "recaptchaVersion": "A String", # The reCAPTCHA version of the reCAPTCHA token in the captcha_response. Required when reCAPTCHA Enterprise is enabled. "safetyNetToken": "A String", # Android only. Used to assert application identity in place of a recaptcha token. A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce. }, "tenantId": "A String", # The ID of the Identity Platform tenant the user is signing in to. If not set, the user will sign in to the default Identity Platform project. diff --git a/docs/dyn/identitytoolkit_v2.projects.html b/docs/dyn/identitytoolkit_v2.projects.html index 00fe6530c1..1763a65aeb 100644 --- a/docs/dyn/identitytoolkit_v2.projects.html +++ b/docs/dyn/identitytoolkit_v2.projects.html @@ -280,27 +280,29 @@

Method Details

}, }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The project-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "signIn": { # Configuration related to local sign in methods. # Configuration related to local sign in methods. "allowDuplicateEmails": True or False, # Whether to allow more than one account to have the same email. @@ -502,27 +504,29 @@

Method Details

}, }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The project-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "signIn": { # Configuration related to local sign in methods. # Configuration related to local sign in methods. "allowDuplicateEmails": True or False, # Whether to allow more than one account to have the same email. @@ -723,27 +727,29 @@

Method Details

}, }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The project-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "signIn": { # Configuration related to local sign in methods. # Configuration related to local sign in methods. "allowDuplicateEmails": True or False, # Whether to allow more than one account to have the same email. diff --git a/docs/dyn/identitytoolkit_v2.projects.tenants.html b/docs/dyn/identitytoolkit_v2.projects.tenants.html index 1fd0bc408d..3872d556b0 100644 --- a/docs/dyn/identitytoolkit_v2.projects.tenants.html +++ b/docs/dyn/identitytoolkit_v2.projects.tenants.html @@ -199,27 +199,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. @@ -311,27 +313,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. @@ -448,27 +452,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. @@ -628,27 +634,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. @@ -758,27 +766,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. @@ -871,27 +881,29 @@

Method Details

], }, "recaptchaConfig": { # The reCAPTCHA Enterprise integration config. # The tenant-level reCAPTCHA config. - "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA. + "emailPasswordEnforcementState": "A String", # The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA. "managedRules": [ # The managed rules for authentication action based on reCAPTCHA scores. The rules are shared across providers for a given tenant project. { # The config for a reCAPTCHA managed rule. Models a single interval [start_score, end_score]. The start_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. "endScore": 3.14, # The end score (inclusive) of the score range for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the riskiest request (likely a bot), whereas 1.0 indicates the safest request (likely a human). See https://cloud.google.com/recaptcha-enterprise/docs/interpret-assessment. }, ], - "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA. + "phoneEnforcementState": "A String", # The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA. "recaptchaKeys": [ # The reCAPTCHA keys. { # The reCAPTCHA key config. reCAPTCHA Enterprise offers different keys for different client platforms. "key": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}" "type": "A String", # The client's platform type. }, ], - "tollFraudManagedRules": [ # The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows. - { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0. + "tollFraudManagedRules": [ # The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true. + { # The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping. "action": "A String", # The action taken if the reCAPTCHA score of a request is within the interval [start_score, end_score]. - "startScore": 3.14, # The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. + "startScore": 3.14, # The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms. }, ], "useAccountDefender": True or False, # Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE. }, "smsRegionConfig": { # Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. # Configures which regions are enabled for SMS verification code sending. "allowByDefault": { # Defines a policy of allowing every region by default and adding disallowed regions to a disallow list. # A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. diff --git a/docs/dyn/identitytoolkit_v2.v2.html b/docs/dyn/identitytoolkit_v2.v2.html index 637490901d..63e2af8803 100644 --- a/docs/dyn/identitytoolkit_v2.v2.html +++ b/docs/dyn/identitytoolkit_v2.v2.html @@ -153,6 +153,8 @@

Method Details

}, ], "recaptchaKey": "A String", # The reCAPTCHA Enterprise key resource name, e.g. "projects/{project}/keys/{key}". This will only be returned when the reCAPTCHA enforcement state is AUDIT or ENFORCE on at least one of the reCAPTCHA providers. + "useSmsBotScore": True or False, # Whether to use the rCE bot score for reCAPTCHA phone provider. + "useSmsTollFraudProtection": True or False, # Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. }
diff --git a/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json b/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json index d0acc1f6d8..a621144021 100644 --- a/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json +++ b/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json @@ -1239,7 +1239,7 @@ } } }, -"revision": "20241017", +"revision": "20241024", "rootUrl": "https://identitytoolkit.googleapis.com/", "schemas": { "GoogleCloudIdentitytoolkitV1Argon2Parameters": { @@ -2441,7 +2441,7 @@ true "type": "object" }, "GoogleCloudIdentitytoolkitV1SendVerificationCodeRequest": { -"description": "Request message for SendVerificationCode. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator.", +"description": "Request message for SendVerificationCode. 'captcha_response' is required when reCAPTCHA enterprise is enabled, or otherwise at least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator.", "id": "GoogleCloudIdentitytoolkitV1SendVerificationCodeRequest", "properties": { "autoRetrievalInfo": { @@ -2469,7 +2469,7 @@ true "type": "string" }, "iosReceipt": { -"description": "Receipt of successful iOS app token validation. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. This should come from the response of verifyIosClient. If present, the caller should also provide the `ios_secret`, as well as a bundle ID in the `x-ios-bundle-identifier` header, which must match the bundle ID from the verifyIosClient request.", +"description": "Receipt of successful iOS app token validation. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). This should come from the response of verifyIosClient. If present, the caller should also provide the `ios_secret`, as well as a bundle ID in the `x-ios-bundle-identifier` header, which must match the bundle ID from the verifyIosClient request.", "type": "string" }, "iosSecret": { @@ -2481,15 +2481,15 @@ true "type": "string" }, "playIntegrityToken": { -"description": "Android only. Used to assert application identity in place of a recaptcha token (and safety_net_token). At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, , or `play_integrity_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. A Play Integrity Token can be generated via the [PlayIntegrity API](https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce.", +"description": "Android only. Used to assert application identity in place of a recaptcha token (and safety_net_token). At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, , or `play_integrity_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). A Play Integrity Token can be generated via the [PlayIntegrity API](https://developer.android.com/google/play/integrity) with applying SHA256 to the `phone_number` field as the nonce.", "type": "string" }, "recaptchaToken": { -"description": "Recaptcha token for app verification. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. The recaptcha should be generated by calling getRecaptchaParams and the recaptcha token will be generated on user completion of the recaptcha challenge.", +"description": "Recaptcha token for app verification. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). The recaptcha should be generated by calling getRecaptchaParams and the recaptcha token will be generated on user completion of the recaptcha challenge.", "type": "string" }, "recaptchaVersion": { -"description": "Optional. The reCAPTCHA version of the reCAPTCHA token in the captcha_response.", +"description": "Optional. The reCAPTCHA version of the reCAPTCHA token in the captcha_response. Required when reCAPTCHA Enterprise is enabled.", "enum": [ "RECAPTCHA_VERSION_UNSPECIFIED", "RECAPTCHA_ENTERPRISE" @@ -2501,7 +2501,7 @@ true "type": "string" }, "safetyNetToken": { -"description": "Android only. Used to assert application identity in place of a recaptcha token. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator. A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce.", +"description": "Android only. Used to assert application identity in place of a recaptcha token. At least one of (`ios_receipt` and `ios_secret`), `recaptcha_token`, or `safety_net_token` must be specified to verify the verification code is being sent on behalf of a real app and not an emulator, if 'captcha_response' is not used (reCAPTCHA enterprise is not enabled). A SafetyNet Token can be generated via the [SafetyNet Android Attestation API](https://developer.android.com/training/safetynet/attestation.html), with the Base64 encoding of the `phone_number` field as the nonce.", "type": "string" }, "tenantId": { diff --git a/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json b/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json index 5ddba9045d..1acc4d1fa9 100644 --- a/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json +++ b/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json @@ -1655,7 +1655,7 @@ } } }, -"revision": "20241017", +"revision": "20241024", "rootUrl": "https://identitytoolkit.googleapis.com/", "schemas": { "GoogleCloudIdentitytoolkitAdminV2AllowByDefault": { @@ -2616,7 +2616,7 @@ "id": "GoogleCloudIdentitytoolkitAdminV2RecaptchaConfig", "properties": { "emailPasswordEnforcementState": { -"description": "The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all related user flows protected by reCAPTCHA.", +"description": "The reCAPTCHA config for email/password provider, containing the enforcement status. The email/password provider contains all email related user flows protected by reCAPTCHA.", "enum": [ "RECAPTCHA_PROVIDER_ENFORCEMENT_STATE_UNSPECIFIED", "OFF", @@ -2639,7 +2639,7 @@ "type": "array" }, "phoneEnforcementState": { -"description": "The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all related user flows protected by reCAPTCHA.", +"description": "The reCAPTCHA config for phone provider, containing the enforcement status. The phone provider contains all SMS related user flows protected by reCAPTCHA.", "enum": [ "RECAPTCHA_PROVIDER_ENFORCEMENT_STATE_UNSPECIFIED", "OFF", @@ -2662,7 +2662,7 @@ "type": "array" }, "tollFraudManagedRules": { -"description": "The managed rules for toll fraud provider, containing the enforcement status. The toll fraud provider contains all SMS related user flows.", +"description": "The managed rules for the authentication action based on reCAPTCHA toll fraud risk scores. Toll fraud managed rules will only take effect when the phone_enforcement_state is AUDIT or ENFORCE and use_sms_toll_fraud_protection is true.", "items": { "$ref": "GoogleCloudIdentitytoolkitAdminV2RecaptchaTollFraudManagedRule" }, @@ -2671,6 +2671,14 @@ "useAccountDefender": { "description": "Whether to use the account defender for reCAPTCHA assessment. Defaults to `false`.", "type": "boolean" +}, +"useSmsBotScore": { +"description": "Whether to use the rCE bot score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE.", +"type": "boolean" +}, +"useSmsTollFraudProtection": { +"description": "Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider. Can only be true when the phone_enforcement_state is AUDIT or ENFORCE.", +"type": "boolean" } }, "type": "object" @@ -2727,7 +2735,7 @@ "type": "object" }, "GoogleCloudIdentitytoolkitAdminV2RecaptchaTollFraudManagedRule": { -"description": "The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The start_score is maximum_allowed_score. End score is 1.0.", +"description": "The config for a reCAPTCHA toll fraud assessment managed rule. Models a single interval [start_score, end_score]. The end_score is implicit. It is either the closest smaller end_score (if one is available) or 0. Intervals in aggregate span [0, 1] without overlapping.", "id": "GoogleCloudIdentitytoolkitAdminV2RecaptchaTollFraudManagedRule", "properties": { "action": { @@ -2743,7 +2751,7 @@ "type": "string" }, "startScore": { -"description": "The start score (inclusive) for an action. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms.", +"description": "The start score (inclusive) for an action. Must be a value between 0.0 and 1.0, at 11 discrete values; e.g. 0, 0.1, 0.2, 0.3, ... 0.9, 1.0. A score of 0.0 indicates the safest request (likely legitimate), whereas 1.0 indicates the riskiest request (likely toll fraud). See https://cloud.google.com/recaptcha-enterprise/docs/sms-fraud-detection#create-assessment-sms.", "format": "float", "type": "number" } @@ -3369,6 +3377,14 @@ "recaptchaKey": { "description": "The reCAPTCHA Enterprise key resource name, e.g. \"projects/{project}/keys/{key}\". This will only be returned when the reCAPTCHA enforcement state is AUDIT or ENFORCE on at least one of the reCAPTCHA providers.", "type": "string" +}, +"useSmsBotScore": { +"description": "Whether to use the rCE bot score for reCAPTCHA phone provider.", +"type": "boolean" +}, +"useSmsTollFraudProtection": { +"description": "Whether to use the rCE sms toll fraud protection risk score for reCAPTCHA phone provider.", +"type": "boolean" } }, "type": "object" @@ -3546,7 +3562,7 @@ "type": "string" }, "recaptchaVersion": { -"description": "The reCAPTCHA version of the reCAPTCHA token in the captcha_response.", +"description": "The reCAPTCHA version of the reCAPTCHA token in the captcha_response. Required when reCAPTCHA Enterprise is enabled.", "enum": [ "RECAPTCHA_VERSION_UNSPECIFIED", "RECAPTCHA_ENTERPRISE" From ecbf5838271506387683be17e0722a901121cff9 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 11/18] feat(places): update the api #### places:v1 The following keys were added: - schemas.GoogleMapsPlacesV1Photo.properties.flagContentUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1Photo.properties.googleMapsUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1Place.properties.googleMapsLinks.$ref (Total Keys: 1) - schemas.GoogleMapsPlacesV1PlaceAreaSummary.properties.flagContentUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1PlaceGenerativeSummary.properties.overviewFlagContentUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1PlaceGoogleMapsLinks (Total Keys: 7) - schemas.GoogleMapsPlacesV1Review.properties.flagContentUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1Review.properties.googleMapsUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1RoutingSummary.properties.directionsUri.type (Total Keys: 1) - schemas.GoogleMapsPlacesV1SearchTextResponse.properties.searchUri.type (Total Keys: 1) --- docs/dyn/places_v1.places.html | 63 +++++++++++++++++ .../discovery_cache/documents/places.v1.json | 69 ++++++++++++++++++- 2 files changed, 131 insertions(+), 1 deletion(-) diff --git a/docs/dyn/places_v1.places.html b/docs/dyn/places_v1.places.html index d25e23bd50..b810ff690d 100644 --- a/docs/dyn/places_v1.places.html +++ b/docs/dyn/places_v1.places.html @@ -335,6 +335,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -353,6 +355,7 @@

Method Details

"topic": "A String", # The topic of the content, for example "overview" or "restaurant". }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the summary. }, "attributions": [ # A set of data provider that must be shown with this result. { # Information about data providers of this place. @@ -490,10 +493,12 @@

Method Details

"languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "descriptionFlagContentUri": "A String", # A link where users can flag a problem with the description summary. "overview": { # Localized variant of a text in a particular language. # The overview of the place. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "overviewFlagContentUri": "A String", # A link where users can flag a problem with the overview summary. "references": { # Experimental: See https://developers.google.com/maps/documentation/places/web-service/experimental/places-generative for more details. Reference that the generative content is related to. # References that are used to generate the summary description. "places": [ # The list of resource names of the referenced places. This name can be used in other APIs that accept Place resource names. "A String", @@ -505,6 +510,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -524,6 +531,13 @@

Method Details

"goodForChildren": True or False, # Place is good for children. "goodForGroups": True or False, # Place accommodates groups. "goodForWatchingSports": True or False, # Place is suitable for watching sports. + "googleMapsLinks": { # Links to trigger different Google Maps actions. # Links to trigger different Google Maps actions. + "directionsUri": "A String", # A link to show the directions to the place. The link only populates the destination location and uses the default travel mode `DRIVE`. + "photosUri": "A String", # A link to show photos of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "placeUri": "A String", # A link to show this place. + "reviewsUri": "A String", # A link to show reviews of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "writeAReviewUri": "A String", # A link to write a review for this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + }, "googleMapsUri": "A String", # A URL providing more information about this place. "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1. "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png"). @@ -562,6 +576,8 @@

Method Details

"uri": "A String", # URI of the author of the Photo or Review. }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the photo. + "googleMapsUri": "A String", # A link to show the photo on Google Maps. "heightPx": 42, # The maximum available height, in pixels. "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (also called the API "resource" name: `places/{place_id}/photos/{photo}`). "widthPx": 42, # The maximum available width, in pixels. @@ -673,6 +689,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -850,6 +868,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -868,6 +888,7 @@

Method Details

"topic": "A String", # The topic of the content, for example "overview" or "restaurant". }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the summary. }, "attributions": [ # A set of data provider that must be shown with this result. { # Information about data providers of this place. @@ -1005,10 +1026,12 @@

Method Details

"languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "descriptionFlagContentUri": "A String", # A link where users can flag a problem with the description summary. "overview": { # Localized variant of a text in a particular language. # The overview of the place. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "overviewFlagContentUri": "A String", # A link where users can flag a problem with the overview summary. "references": { # Experimental: See https://developers.google.com/maps/documentation/places/web-service/experimental/places-generative for more details. Reference that the generative content is related to. # References that are used to generate the summary description. "places": [ # The list of resource names of the referenced places. This name can be used in other APIs that accept Place resource names. "A String", @@ -1020,6 +1043,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1039,6 +1064,13 @@

Method Details

"goodForChildren": True or False, # Place is good for children. "goodForGroups": True or False, # Place accommodates groups. "goodForWatchingSports": True or False, # Place is suitable for watching sports. + "googleMapsLinks": { # Links to trigger different Google Maps actions. # Links to trigger different Google Maps actions. + "directionsUri": "A String", # A link to show the directions to the place. The link only populates the destination location and uses the default travel mode `DRIVE`. + "photosUri": "A String", # A link to show photos of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "placeUri": "A String", # A link to show this place. + "reviewsUri": "A String", # A link to show reviews of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "writeAReviewUri": "A String", # A link to write a review for this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + }, "googleMapsUri": "A String", # A URL providing more information about this place. "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1. "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png"). @@ -1077,6 +1109,8 @@

Method Details

"uri": "A String", # URI of the author of the Photo or Review. }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the photo. + "googleMapsUri": "A String", # A link to show the photo on Google Maps. "heightPx": 42, # The maximum available height, in pixels. "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (also called the API "resource" name: `places/{place_id}/photos/{photo}`). "widthPx": 42, # The maximum available width, in pixels. @@ -1188,6 +1222,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1240,6 +1276,7 @@

Method Details

], "routingSummaries": [ # A list of routing summaries where each entry associates to the corresponding place in the same index in the `places` field. If the routing summary is not available for one of the places, it will contain an empty entry. This list should have as many entries as the list of places if requested. { # The duration and distance from the routing origin to a place in the response, and a second leg from that place to the destination, if requested. **Note:** Adding `routingSummaries` in the field mask without also including either the `routingParameters.origin` parameter or the `searchAlongRouteParameters.polyline.encodedPolyline` parameter in the request causes an error. + "directionsUri": "A String", # A link to show directions on Google Maps using the waypoints from the given routing summary. The route generated by this link is not guaranteed to be the same as the route used to generate the routing summary. The link uses information provided in the request, from fields including `routingParameters` and `searchAlongRouteParameters` when applicable, to generate the directions link. "legs": [ # The legs of the trip. When you calculate travel duration and distance from a set origin, `legs` contains a single leg containing the duration and distance from the origin to the destination. When you do a search along route, `legs` contains two legs: one from the origin to place, and one from the place to the destination. { # A leg is a single portion of a journey from one location to another. "distanceMeters": 42, # The distance of this leg of the trip. @@ -1366,6 +1403,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1391,6 +1430,8 @@

Method Details

"uri": "A String", # URI of the author of the Photo or Review. }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the photo. + "googleMapsUri": "A String", # A link to show the photo on Google Maps. "heightPx": 42, # The maximum available height, in pixels. "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (also called the API "resource" name: `places/{place_id}/photos/{photo}`). "widthPx": 42, # The maximum available width, in pixels. @@ -1403,6 +1444,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1487,6 +1530,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1505,6 +1550,7 @@

Method Details

"topic": "A String", # The topic of the content, for example "overview" or "restaurant". }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the summary. }, "attributions": [ # A set of data provider that must be shown with this result. { # Information about data providers of this place. @@ -1642,10 +1688,12 @@

Method Details

"languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "descriptionFlagContentUri": "A String", # A link where users can flag a problem with the description summary. "overview": { # Localized variant of a text in a particular language. # The overview of the place. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "overviewFlagContentUri": "A String", # A link where users can flag a problem with the overview summary. "references": { # Experimental: See https://developers.google.com/maps/documentation/places/web-service/experimental/places-generative for more details. Reference that the generative content is related to. # References that are used to generate the summary description. "places": [ # The list of resource names of the referenced places. This name can be used in other APIs that accept Place resource names. "A String", @@ -1657,6 +1705,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1676,6 +1726,13 @@

Method Details

"goodForChildren": True or False, # Place is good for children. "goodForGroups": True or False, # Place accommodates groups. "goodForWatchingSports": True or False, # Place is suitable for watching sports. + "googleMapsLinks": { # Links to trigger different Google Maps actions. # Links to trigger different Google Maps actions. + "directionsUri": "A String", # A link to show the directions to the place. The link only populates the destination location and uses the default travel mode `DRIVE`. + "photosUri": "A String", # A link to show photos of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "placeUri": "A String", # A link to show this place. + "reviewsUri": "A String", # A link to show reviews of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + "writeAReviewUri": "A String", # A link to write a review for this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps. + }, "googleMapsUri": "A String", # A URL providing more information about this place. "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1. "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png"). @@ -1714,6 +1771,8 @@

Method Details

"uri": "A String", # URI of the author of the Photo or Review. }, ], + "flagContentUri": "A String", # A link where users can flag a problem with the photo. + "googleMapsUri": "A String", # A link to show the photo on Google Maps. "heightPx": 42, # The maximum available height, in pixels. "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (also called the API "resource" name: `places/{place_id}/photos/{photo}`). "widthPx": 42, # The maximum available width, in pixels. @@ -1825,6 +1884,8 @@

Method Details

"photoUri": "A String", # Profile photo URI of the author of the Photo or Review. "uri": "A String", # URI of the author of the Photo or Review. }, + "flagContentUri": "A String", # A link where users can flag a problem with the review. + "googleMapsUri": "A String", # A link to show the review on Google Maps. "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: `places/{place_id}/reviews/{review}`). "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. @@ -1877,6 +1938,7 @@

Method Details

], "routingSummaries": [ # A list of routing summaries where each entry associates to the corresponding place in the same index in the `places` field. If the routing summary is not available for one of the places, it will contain an empty entry. This list will have as many entries as the list of places if requested. { # The duration and distance from the routing origin to a place in the response, and a second leg from that place to the destination, if requested. **Note:** Adding `routingSummaries` in the field mask without also including either the `routingParameters.origin` parameter or the `searchAlongRouteParameters.polyline.encodedPolyline` parameter in the request causes an error. + "directionsUri": "A String", # A link to show directions on Google Maps using the waypoints from the given routing summary. The route generated by this link is not guaranteed to be the same as the route used to generate the routing summary. The link uses information provided in the request, from fields including `routingParameters` and `searchAlongRouteParameters` when applicable, to generate the directions link. "legs": [ # The legs of the trip. When you calculate travel duration and distance from a set origin, `legs` contains a single leg containing the duration and distance from the origin to the destination. When you do a search along route, `legs` contains two legs: one from the origin to place, and one from the place to the destination. { # A leg is a single portion of a journey from one location to another. "distanceMeters": 42, # The distance of this leg of the trip. @@ -1885,6 +1947,7 @@

Method Details

], }, ], + "searchUri": "A String", # A link allows the user to search with the same text query as specified in the request on Google Maps. }
diff --git a/googleapiclient/discovery_cache/documents/places.v1.json b/googleapiclient/discovery_cache/documents/places.v1.json index c2792d685d..e755ad6204 100644 --- a/googleapiclient/discovery_cache/documents/places.v1.json +++ b/googleapiclient/discovery_cache/documents/places.v1.json @@ -280,7 +280,7 @@ } } }, -"revision": "20241022", +"revision": "20241028", "rootUrl": "https://places.googleapis.com/", "schemas": { "GoogleGeoTypeViewport": { @@ -957,6 +957,14 @@ }, "type": "array" }, +"flagContentUri": { +"description": "A link where users can flag a problem with the photo.", +"type": "string" +}, +"googleMapsUri": { +"description": "A link to show the photo on Google Maps.", +"type": "string" +}, "heightPx": { "description": "The maximum available height, in pixels.", "format": "int32", @@ -1102,6 +1110,10 @@ "description": "Place is suitable for watching sports.", "type": "boolean" }, +"googleMapsLinks": { +"$ref": "GoogleMapsPlacesV1PlaceGoogleMapsLinks", +"description": "Links to trigger different Google Maps actions." +}, "googleMapsUri": { "description": "A URL providing more information about this place.", "type": "string" @@ -1366,6 +1378,10 @@ "$ref": "GoogleMapsPlacesV1ContentBlock" }, "type": "array" +}, +"flagContentUri": { +"description": "A link where users can flag a problem with the summary.", +"type": "string" } }, "type": "object" @@ -1393,10 +1409,18 @@ "$ref": "GoogleTypeLocalizedText", "description": "The detailed description of the place." }, +"descriptionFlagContentUri": { +"description": "A link where users can flag a problem with the description summary.", +"type": "string" +}, "overview": { "$ref": "GoogleTypeLocalizedText", "description": "The overview of the place." }, +"overviewFlagContentUri": { +"description": "A link where users can flag a problem with the overview summary.", +"type": "string" +}, "references": { "$ref": "GoogleMapsPlacesV1References", "description": "References that are used to generate the summary description." @@ -1404,6 +1428,33 @@ }, "type": "object" }, +"GoogleMapsPlacesV1PlaceGoogleMapsLinks": { +"description": "Links to trigger different Google Maps actions.", +"id": "GoogleMapsPlacesV1PlaceGoogleMapsLinks", +"properties": { +"directionsUri": { +"description": "A link to show the directions to the place. The link only populates the destination location and uses the default travel mode `DRIVE`.", +"type": "string" +}, +"photosUri": { +"description": "A link to show photos of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps.", +"type": "string" +}, +"placeUri": { +"description": "A link to show this place.", +"type": "string" +}, +"reviewsUri": { +"description": "A link to show reviews of this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps.", +"type": "string" +}, +"writeAReviewUri": { +"description": "A link to write a review for this place. This link is currently not supported on Google Maps Mobile and only works on the web version of Google Maps.", +"type": "string" +} +}, +"type": "object" +}, "GoogleMapsPlacesV1PlaceOpeningHours": { "description": "Information about business hour of the place.", "id": "GoogleMapsPlacesV1PlaceOpeningHours", @@ -1656,6 +1707,14 @@ "$ref": "GoogleMapsPlacesV1AuthorAttribution", "description": "This review's author." }, +"flagContentUri": { +"description": "A link where users can flag a problem with the review.", +"type": "string" +}, +"googleMapsUri": { +"description": "A link to show the review on Google Maps.", +"type": "string" +}, "name": { "description": "A reference representing this place review which may be used to look up this place review again (also called the API \"resource\" name: `places/{place_id}/reviews/{review}`).", "type": "string" @@ -1761,6 +1820,10 @@ "description": "The duration and distance from the routing origin to a place in the response, and a second leg from that place to the destination, if requested. **Note:** Adding `routingSummaries` in the field mask without also including either the `routingParameters.origin` parameter or the `searchAlongRouteParameters.polyline.encodedPolyline` parameter in the request causes an error.", "id": "GoogleMapsPlacesV1RoutingSummary", "properties": { +"directionsUri": { +"description": "A link to show directions on Google Maps using the waypoints from the given routing summary. The route generated by this link is not guaranteed to be the same as the route used to generate the routing summary. The link uses information provided in the request, from fields including `routingParameters` and `searchAlongRouteParameters` when applicable, to generate the directions link.", +"type": "string" +}, "legs": { "description": "The legs of the trip. When you calculate travel duration and distance from a set origin, `legs` contains a single leg containing the duration and distance from the origin to the destination. When you do a search along route, `legs` contains two legs: one from the origin to place, and one from the place to the destination.", "items": { @@ -2106,6 +2169,10 @@ "$ref": "GoogleMapsPlacesV1RoutingSummary" }, "type": "array" +}, +"searchUri": { +"description": "A link allows the user to search with the same text query as specified in the request on Google Maps.", +"type": "string" } }, "type": "object" From eb919ff3667b351f828bd5a86fd2cacd1b9b2e25 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 12/18] feat(retail): update the api #### retail:v2 The following keys were added: - schemas.GoogleCloudRetailV2PinControlMetadata (Total Keys: 10) - schemas.GoogleCloudRetailV2SearchResponse.properties.pinControlMetadata.$ref (Total Keys: 1) #### retail:v2alpha The following keys were added: - schemas.GoogleCloudRetailV2alphaPinControlMetadata (Total Keys: 10) - schemas.GoogleCloudRetailV2alphaSearchResponse.properties.pinControlMetadata.$ref (Total Keys: 1) #### retail:v2beta The following keys were added: - schemas.GoogleCloudRetailV2betaPinControlMetadata (Total Keys: 10) - schemas.GoogleCloudRetailV2betaSearchResponse.properties.pinControlMetadata.$ref (Total Keys: 1) --- ...rojects.locations.catalogs.placements.html | 16 ++++++++ ...cts.locations.catalogs.servingConfigs.html | 16 ++++++++ ...rojects.locations.catalogs.placements.html | 16 ++++++++ ...cts.locations.catalogs.servingConfigs.html | 16 ++++++++ ...rojects.locations.catalogs.placements.html | 16 ++++++++ ...cts.locations.catalogs.servingConfigs.html | 16 ++++++++ .../discovery_cache/documents/retail.v2.json | 41 ++++++++++++++++++- .../documents/retail.v2alpha.json | 41 ++++++++++++++++++- .../documents/retail.v2beta.json | 41 ++++++++++++++++++- 9 files changed, 216 insertions(+), 3 deletions(-) diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index bc8013c056..3a67b1aa94 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -570,6 +570,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html index 57e623dadb..6622e4d030 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html @@ -1121,6 +1121,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index c6f92d727c..8cb12857dd 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -571,6 +571,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html index 2dcc9f38f3..8bbba10662 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html @@ -1122,6 +1122,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index 894e9af65a..9fa608bc9d 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -570,6 +570,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html index 94d4b5d209..dd9499aade 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html @@ -1121,6 +1121,22 @@

Method Details

}, ], "nextPageToken": "A String", # A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + "pinControlMetadata": { # Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins. # Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin. + "allMatchedPins": { # Map of all matched pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + "droppedPins": { # Map of pins that were dropped due to overlap with other matching pins, keyed by pin position. + "a_key": { # List of product ids which have associated pins. + "productId": [ # List of product ids which have associated pins. + "A String", + ], + }, + }, + }, "queryExpansionInfo": { # Information describing query expansion including whether expansion has occurred. # Query expansion information for the returned results. "expandedQuery": True or False, # Bool describing whether query expansion has occurred. "pinnedResultCount": "A String", # Number of pinned results. This field will only be set when expansion happens and SearchRequest.QueryExpansionSpec.pin_unexpanded_results is set to true. diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index aa1043a6ea..a13e9a3acc 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -2246,7 +2246,7 @@ } } }, -"revision": "20241017", +"revision": "20241024", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -4041,6 +4041,41 @@ "properties": {}, "type": "object" }, +"GoogleCloudRetailV2PinControlMetadata": { +"description": "Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins.", +"id": "GoogleCloudRetailV2PinControlMetadata", +"properties": { +"allMatchedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2PinControlMetadataProductPins" +}, +"description": "Map of all matched pins, keyed by pin position.", +"type": "object" +}, +"droppedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2PinControlMetadataProductPins" +}, +"description": "Map of pins that were dropped due to overlap with other matching pins, keyed by pin position.", +"type": "object" +} +}, +"type": "object" +}, +"GoogleCloudRetailV2PinControlMetadataProductPins": { +"description": "List of product ids which have associated pins.", +"id": "GoogleCloudRetailV2PinControlMetadataProductPins", +"properties": { +"productId": { +"description": "List of product ids which have associated pins.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2PredictRequest": { "description": "Request message for Predict method.", "id": "GoogleCloudRetailV2PredictRequest", @@ -5504,6 +5539,10 @@ "description": "A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, +"pinControlMetadata": { +"$ref": "GoogleCloudRetailV2PinControlMetadata", +"description": "Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin." +}, "queryExpansionInfo": { "$ref": "GoogleCloudRetailV2SearchResponseQueryExpansionInfo", "description": "Query expansion information for the returned results." diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index bb04cf309f..38dcfa4115 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -2767,7 +2767,7 @@ } } }, -"revision": "20241017", +"revision": "20241024", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -5864,6 +5864,41 @@ "properties": {}, "type": "object" }, +"GoogleCloudRetailV2alphaPinControlMetadata": { +"description": "Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins.", +"id": "GoogleCloudRetailV2alphaPinControlMetadata", +"properties": { +"allMatchedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2alphaPinControlMetadataProductPins" +}, +"description": "Map of all matched pins, keyed by pin position.", +"type": "object" +}, +"droppedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2alphaPinControlMetadataProductPins" +}, +"description": "Map of pins that were dropped due to overlap with other matching pins, keyed by pin position.", +"type": "object" +} +}, +"type": "object" +}, +"GoogleCloudRetailV2alphaPinControlMetadataProductPins": { +"description": "List of product ids which have associated pins.", +"id": "GoogleCloudRetailV2alphaPinControlMetadataProductPins", +"properties": { +"productId": { +"description": "List of product ids which have associated pins.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2alphaPredictRequest": { "description": "Request message for Predict method.", "id": "GoogleCloudRetailV2alphaPredictRequest", @@ -7375,6 +7410,10 @@ "description": "A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, +"pinControlMetadata": { +"$ref": "GoogleCloudRetailV2alphaPinControlMetadata", +"description": "Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin." +}, "queryExpansionInfo": { "$ref": "GoogleCloudRetailV2alphaSearchResponseQueryExpansionInfo", "description": "Query expansion information for the returned results." diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index 423297ace1..d2b2d4f35c 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -2391,7 +2391,7 @@ } } }, -"revision": "20241017", +"revision": "20241024", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -5842,6 +5842,41 @@ "properties": {}, "type": "object" }, +"GoogleCloudRetailV2betaPinControlMetadata": { +"description": "Metadata for pinning to be returned in the response. This is used for distinguishing between applied vs dropped pins.", +"id": "GoogleCloudRetailV2betaPinControlMetadata", +"properties": { +"allMatchedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2betaPinControlMetadataProductPins" +}, +"description": "Map of all matched pins, keyed by pin position.", +"type": "object" +}, +"droppedPins": { +"additionalProperties": { +"$ref": "GoogleCloudRetailV2betaPinControlMetadataProductPins" +}, +"description": "Map of pins that were dropped due to overlap with other matching pins, keyed by pin position.", +"type": "object" +} +}, +"type": "object" +}, +"GoogleCloudRetailV2betaPinControlMetadataProductPins": { +"description": "List of product ids which have associated pins.", +"id": "GoogleCloudRetailV2betaPinControlMetadataProductPins", +"properties": { +"productId": { +"description": "List of product ids which have associated pins.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudRetailV2betaPredictRequest": { "description": "Request message for Predict method.", "id": "GoogleCloudRetailV2betaPredictRequest", @@ -7305,6 +7340,10 @@ "description": "A token that can be sent as SearchRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, +"pinControlMetadata": { +"$ref": "GoogleCloudRetailV2betaPinControlMetadata", +"description": "Metadata for pin controls which were applicable to the request. This contains two map fields, one for all matched pins and one for pins which were matched but not applied. The two maps are keyed by pin position, and the values are the product ids which were matched to that pin." +}, "queryExpansionInfo": { "$ref": "GoogleCloudRetailV2betaSearchResponseQueryExpansionInfo", "description": "Query expansion information for the returned results." From 4975f964aa9667c8413efe05c11746be6ffec47e Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 13/18] feat(run): update the api #### run:v2 The following keys were added: - schemas.GoogleCloudRunV2GCSVolumeSource.properties.mountOptions (Total Keys: 2) --- .../run_v2.projects.locations.jobs.executions.html | 6 ++++++ ..._v2.projects.locations.jobs.executions.tasks.html | 6 ++++++ docs/dyn/run_v2.projects.locations.jobs.html | 12 ++++++++++++ docs/dyn/run_v2.projects.locations.services.html | 12 ++++++++++++ ...run_v2.projects.locations.services.revisions.html | 6 ++++++ .../discovery_cache/documents/run.v2.json | 9 ++++++++- 6 files changed, 50 insertions(+), 1 deletion(-) diff --git a/docs/dyn/run_v2.projects.locations.jobs.executions.html b/docs/dyn/run_v2.projects.locations.jobs.executions.html index 3d82c6b756..0966a79ae2 100644 --- a/docs/dyn/run_v2.projects.locations.jobs.executions.html +++ b/docs/dyn/run_v2.projects.locations.jobs.executions.html @@ -388,6 +388,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -599,6 +602,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. diff --git a/docs/dyn/run_v2.projects.locations.jobs.executions.tasks.html b/docs/dyn/run_v2.projects.locations.jobs.executions.tasks.html index 5582b4f661..8479b82c2c 100644 --- a/docs/dyn/run_v2.projects.locations.jobs.executions.tasks.html +++ b/docs/dyn/run_v2.projects.locations.jobs.executions.tasks.html @@ -267,6 +267,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -485,6 +488,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. diff --git a/docs/dyn/run_v2.projects.locations.jobs.html b/docs/dyn/run_v2.projects.locations.jobs.html index c4e337fe06..2965163ba8 100644 --- a/docs/dyn/run_v2.projects.locations.jobs.html +++ b/docs/dyn/run_v2.projects.locations.jobs.html @@ -296,6 +296,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -597,6 +600,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -884,6 +890,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -1130,6 +1139,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. diff --git a/docs/dyn/run_v2.projects.locations.services.html b/docs/dyn/run_v2.projects.locations.services.html index f09e10b0cc..a8fc754541 100644 --- a/docs/dyn/run_v2.projects.locations.services.html +++ b/docs/dyn/run_v2.projects.locations.services.html @@ -306,6 +306,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -640,6 +643,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -961,6 +967,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -1239,6 +1248,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. diff --git a/docs/dyn/run_v2.projects.locations.services.revisions.html b/docs/dyn/run_v2.projects.locations.services.revisions.html index 3484c54c42..ffd9e73abe 100644 --- a/docs/dyn/run_v2.projects.locations.services.revisions.html +++ b/docs/dyn/run_v2.projects.locations.services.revisions.html @@ -345,6 +345,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. @@ -562,6 +565,9 @@

Method Details

}, "gcs": { # Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. # Persistent storage backed by a Google Cloud Storage bucket. "bucket": "A String", # Cloud Storage Bucket name. + "mountOptions": [ # A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading "--". + "A String", + ], "readOnly": True or False, # If true, the volume will be mounted as read only for all mounts. }, "name": "A String", # Required. Volume's name. diff --git a/googleapiclient/discovery_cache/documents/run.v2.json b/googleapiclient/discovery_cache/documents/run.v2.json index 9f74a3d114..51b469c37c 100644 --- a/googleapiclient/discovery_cache/documents/run.v2.json +++ b/googleapiclient/discovery_cache/documents/run.v2.json @@ -1526,7 +1526,7 @@ } } }, -"revision": "20241011", +"revision": "20241025", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -2267,6 +2267,13 @@ "description": "Cloud Storage Bucket name.", "type": "string" }, +"mountOptions": { +"description": "A list of additional flags to pass to the gcsfuse CLI. Options should be specified without the leading \"--\".", +"items": { +"type": "string" +}, +"type": "array" +}, "readOnly": { "description": "If true, the volume will be mounted as read only for all mounts.", "type": "boolean" From e0dff0cd57e15a4630b83303b2914842cf37dc2a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 14/18] feat(securitycenter): update the api #### securitycenter:v1 The following keys were added: - resources.folders.resources.sources.resources.findings.methods.list.parameters.compareDuration.deprecated (Total Keys: 1) - resources.folders.resources.sources.resources.findings.methods.list.parameters.readTime.deprecated (Total Keys: 1) - resources.organizations.resources.sources.resources.findings.methods.list.parameters.compareDuration.deprecated (Total Keys: 1) - resources.organizations.resources.sources.resources.findings.methods.list.parameters.readTime.deprecated (Total Keys: 1) - resources.projects.resources.sources.resources.findings.methods.list.parameters.compareDuration.deprecated (Total Keys: 1) - resources.projects.resources.sources.resources.findings.methods.list.parameters.readTime.deprecated (Total Keys: 1) - schemas.GroupFindingsRequest.properties.compareDuration.deprecated (Total Keys: 1) - schemas.GroupFindingsRequest.properties.readTime.deprecated (Total Keys: 1) - schemas.SetFindingStateRequest.properties.startTime.deprecated (Total Keys: 1) --- ...center_v1beta1.organizations.sources.findings.html | 2 +- .../discovery_cache/documents/securitycenter.v1.json | 11 ++++++++++- .../documents/securitycenter.v1beta1.json | 4 ++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/docs/dyn/securitycenter_v1beta1.organizations.sources.findings.html b/docs/dyn/securitycenter_v1beta1.organizations.sources.findings.html index d97d30176f..84cdda0c15 100644 --- a/docs/dyn/securitycenter_v1beta1.organizations.sources.findings.html +++ b/docs/dyn/securitycenter_v1beta1.organizations.sources.findings.html @@ -350,7 +350,7 @@

Method Details

The object takes the form of: { # Request message for updating a finding's state. - "startTime": "A String", # Required. The time at which the updated state takes effect. + "startTime": "A String", # Optional. The time at which the updated state takes effect. If not set uses the current time. "state": "A String", # Required. The desired State of the finding. } diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index e1d53878fd..df5f732061 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -1509,6 +1509,7 @@ ], "parameters": { "compareDuration": { +"deprecated": true, "description": "When compare_duration is set, the ListFindingsResult's \"state_change\" attribute is updated to indicate whether the finding had its state changed, the finding's state remained unchanged, or if the finding was added in any state during the compare_duration period of time that precedes the read_time. This is the time between (read_time - compare_duration) and read_time. The state_change value is derived based on the presence and state of the finding at the two points in time. Intermediate state changes between the two times don't affect the result. For example, the results aren't affected if the finding is made inactive and then active again. Possible \"state_change\" values when compare_duration is specified: * \"CHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration, but changed its state at read_time. * \"UNCHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration and did not change state at read_time. * \"ADDED\": indicates that the finding did not match the given filter or was not present at the start of compare_duration, but was present at read_time. * \"REMOVED\": indicates that the finding was present and matched the filter at the start of compare_duration, but did not match the filter at read_time. If compare_duration is not specified, then the only possible state_change is \"UNUSED\", which will be the state_change set for all findings present at read_time.", "format": "google-duration", "location": "query", @@ -1549,6 +1550,7 @@ "type": "string" }, "readTime": { +"deprecated": true, "description": "Time used as a reference point when filtering findings. The filter is limited to findings existing at the supplied time and their values are those at that specific time. Absence of this field will default to the API's version of NOW.", "format": "google-datetime", "location": "query", @@ -3989,6 +3991,7 @@ ], "parameters": { "compareDuration": { +"deprecated": true, "description": "When compare_duration is set, the ListFindingsResult's \"state_change\" attribute is updated to indicate whether the finding had its state changed, the finding's state remained unchanged, or if the finding was added in any state during the compare_duration period of time that precedes the read_time. This is the time between (read_time - compare_duration) and read_time. The state_change value is derived based on the presence and state of the finding at the two points in time. Intermediate state changes between the two times don't affect the result. For example, the results aren't affected if the finding is made inactive and then active again. Possible \"state_change\" values when compare_duration is specified: * \"CHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration, but changed its state at read_time. * \"UNCHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration and did not change state at read_time. * \"ADDED\": indicates that the finding did not match the given filter or was not present at the start of compare_duration, but was present at read_time. * \"REMOVED\": indicates that the finding was present and matched the filter at the start of compare_duration, but did not match the filter at read_time. If compare_duration is not specified, then the only possible state_change is \"UNUSED\", which will be the state_change set for all findings present at read_time.", "format": "google-duration", "location": "query", @@ -4029,6 +4032,7 @@ "type": "string" }, "readTime": { +"deprecated": true, "description": "Time used as a reference point when filtering findings. The filter is limited to findings existing at the supplied time and their values are those at that specific time. Absence of this field will default to the API's version of NOW.", "format": "google-datetime", "location": "query", @@ -5656,6 +5660,7 @@ ], "parameters": { "compareDuration": { +"deprecated": true, "description": "When compare_duration is set, the ListFindingsResult's \"state_change\" attribute is updated to indicate whether the finding had its state changed, the finding's state remained unchanged, or if the finding was added in any state during the compare_duration period of time that precedes the read_time. This is the time between (read_time - compare_duration) and read_time. The state_change value is derived based on the presence and state of the finding at the two points in time. Intermediate state changes between the two times don't affect the result. For example, the results aren't affected if the finding is made inactive and then active again. Possible \"state_change\" values when compare_duration is specified: * \"CHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration, but changed its state at read_time. * \"UNCHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration and did not change state at read_time. * \"ADDED\": indicates that the finding did not match the given filter or was not present at the start of compare_duration, but was present at read_time. * \"REMOVED\": indicates that the finding was present and matched the filter at the start of compare_duration, but did not match the filter at read_time. If compare_duration is not specified, then the only possible state_change is \"UNUSED\", which will be the state_change set for all findings present at read_time.", "format": "google-duration", "location": "query", @@ -5696,6 +5701,7 @@ "type": "string" }, "readTime": { +"deprecated": true, "description": "Time used as a reference point when filtering findings. The filter is limited to findings existing at the supplied time and their values are those at that specific time. Absence of this field will default to the API's version of NOW.", "format": "google-datetime", "location": "query", @@ -5887,7 +5893,7 @@ } } }, -"revision": "20241018", +"revision": "20241026", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -12034,6 +12040,7 @@ "id": "GroupFindingsRequest", "properties": { "compareDuration": { +"deprecated": true, "description": "When compare_duration is set, the GroupResult's \"state_change\" attribute is updated to indicate whether the finding had its state changed, the finding's state remained unchanged, or if the finding was added during the compare_duration period of time that precedes the read_time. This is the time between (read_time - compare_duration) and read_time. The state_change value is derived based on the presence and state of the finding at the two points in time. Intermediate state changes between the two times don't affect the result. For example, the results aren't affected if the finding is made inactive and then active again. Possible \"state_change\" values when compare_duration is specified: * \"CHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration, but changed its state at read_time. * \"UNCHANGED\": indicates that the finding was present and matched the given filter at the start of compare_duration and did not change state at read_time. * \"ADDED\": indicates that the finding did not match the given filter or was not present at the start of compare_duration, but was present at read_time. * \"REMOVED\": indicates that the finding was present and matched the filter at the start of compare_duration, but did not match the filter at read_time. If compare_duration is not specified, then the only possible state_change is \"UNUSED\", which will be the state_change set for all findings present at read_time. If this field is set then `state_change` must be a specified field in `group_by`.", "format": "google-duration", "type": "string" @@ -12056,6 +12063,7 @@ "type": "string" }, "readTime": { +"deprecated": true, "description": "Time used as a reference point when filtering findings. The filter is limited to findings existing at the supplied time and their values are those at that specific time. Absence of this field will default to the API's version of NOW.", "format": "google-datetime", "type": "string" @@ -13936,6 +13944,7 @@ "id": "SetFindingStateRequest", "properties": { "startTime": { +"deprecated": true, "description": "Optional. The time at which the updated state takes effect. If unset, defaults to the request time.", "format": "google-datetime", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index fd18d145c8..621b5df939 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -913,7 +913,7 @@ } } }, -"revision": "20241018", +"revision": "20241026", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -8233,7 +8233,7 @@ "id": "SetFindingStateRequest", "properties": { "startTime": { -"description": "Required. The time at which the updated state takes effect.", +"description": "Optional. The time at which the updated state takes effect. If not set uses the current time.", "format": "google-datetime", "type": "string" }, From 4f40ad990e69247d4256c0f3d1bcad87dfa232f8 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 15/18] feat(spanner): update the api #### spanner:v1 The following keys were added: - schemas.BeginTransactionRequest.properties.mutationKey.$ref (Total Keys: 1) - schemas.CommitRequest.properties.precommitToken.$ref (Total Keys: 1) - schemas.CommitResponse.properties.precommitToken.$ref (Total Keys: 1) - schemas.ExecuteBatchDmlResponse.properties.precommitToken.$ref (Total Keys: 1) - schemas.Instance.properties.defaultBackupScheduleType.type (Total Keys: 1) - schemas.PartialResultSet.properties.precommitToken.$ref (Total Keys: 1) - schemas.ReadWrite.properties.multiplexedSessionPreviousTransactionId (Total Keys: 2) - schemas.ResultSet.properties.precommitToken.$ref (Total Keys: 1) --- ...spanner_v1.projects.instances.backups.html | 26 ++-- ...projects.instances.databaseOperations.html | 2 +- ...s.instances.databases.backupSchedules.html | 24 ++-- ...cts.instances.databases.databaseRoles.html | 4 +- ...anner_v1.projects.instances.databases.html | 30 ++--- ...projects.instances.databases.sessions.html | 121 ++++++++++++++++++ docs/dyn/spanner_v1.projects.instances.html | 20 +-- .../discovery_cache/documents/spanner.v1.json | 87 +++++++++---- 8 files changed, 241 insertions(+), 73 deletions(-) diff --git a/docs/dyn/spanner_v1.projects.instances.backups.html b/docs/dyn/spanner_v1.projects.instances.backups.html index 0a4725806e..0f54f1d738 100644 --- a/docs/dyn/spanner_v1.projects.instances.backups.html +++ b/docs/dyn/spanner_v1.projects.instances.backups.html @@ -96,7 +96,7 @@

Instance Methods

Gets metadata on a pending or completed Backup.

getIamPolicy(resource, body=None, x__xgafv=None)

-

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.

+

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.

list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

Lists completed and pending backups. Backups returned are ordered by `create_time` in descending order, starting from the most recent `create_time`.

@@ -108,10 +108,10 @@

Instance Methods

Updates a pending or completed Backup.

setIamPolicy(resource, body=None, x__xgafv=None)

-

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.

+

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.

testIamPermissions(resource, body=None, x__xgafv=None)

-

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.

+

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.

Method Details

close() @@ -132,7 +132,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the copied backup. # Optional. The encryption configuration used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the source backup by default, namely encryption_type = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. Kms keys specified can be in any order. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. KMS keys specified can be in any order. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -198,7 +198,7 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, - "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -239,7 +239,7 @@

Method Details

GOOGLE_DEFAULT_ENCRYPTION - Use Google default encryption. CUSTOMER_MANAGED_ENCRYPTION - Use customer managed encryption. If specified, `kms_key_name` must contain a valid Cloud KMS key. encryptionConfig_kmsKeyName: string, Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - encryptionConfig_kmsKeyNames: string, Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. (repeated) + encryptionConfig_kmsKeyNames: string, Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. (repeated) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -321,7 +321,7 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, - "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -357,7 +357,7 @@

Method Details

getIamPolicy(resource, body=None, x__xgafv=None) -
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.
+  
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
@@ -437,7 +437,7 @@ 

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, - "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -517,7 +517,7 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, - "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -579,7 +579,7 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, - "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -615,7 +615,7 @@

Method Details

setIamPolicy(resource, body=None, x__xgafv=None) -
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.
+  
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources. (required)
@@ -673,7 +673,7 @@ 

Method Details

testIamPermissions(resource, body=None, x__xgafv=None) -
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.
+  
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
diff --git a/docs/dyn/spanner_v1.projects.instances.databaseOperations.html b/docs/dyn/spanner_v1.projects.instances.databaseOperations.html
index cc2e7d08dd..803ea60e00 100644
--- a/docs/dyn/spanner_v1.projects.instances.databaseOperations.html
+++ b/docs/dyn/spanner_v1.projects.instances.databaseOperations.html
@@ -95,7 +95,7 @@ 

Method Details

Args: parent: string, Required. The instance of the database operations. Values are of the form `projects//instances/`. (required) - filter: string, An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \ `(metadata.source_type:BACKUP) AND` \ `(metadata.backup_info.backup:backup_howl) AND` \ `(metadata.name:restored_howl) AND` \ `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains "backup_howl". * The restored database's name contains "restored_howl". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error. + filter: string, An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \ `(metadata.source_type:BACKUP) AND` \ `(metadata.backup_info.backup:backup_howl) AND` \ `(metadata.name:restored_howl) AND` \ `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains "backup_howl". * The restored database's name contains "restored_howl". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error. pageSize: integer, Number of operations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size. pageToken: string, If non-empty, `page_token` should contain a next_page_token from a previous ListDatabaseOperationsResponse to the same `parent` and with the same `filter`. x__xgafv: string, V1 error format. diff --git a/docs/dyn/spanner_v1.projects.instances.databases.backupSchedules.html b/docs/dyn/spanner_v1.projects.instances.databases.backupSchedules.html index 588a5ac240..f8c459646c 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.backupSchedules.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.backupSchedules.html @@ -88,7 +88,7 @@

Instance Methods

Gets backup schedule for the input schedule name.

getIamPolicy(resource, body=None, x__xgafv=None)

-

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.

+

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

Lists all the backup schedules for the database.

@@ -100,10 +100,10 @@

Instance Methods

Updates a backup schedule.

setIamPolicy(resource, body=None, x__xgafv=None)

-

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.

+

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.

testIamPermissions(resource, body=None, x__xgafv=None)

-

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.

+

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.

Method Details

close() @@ -123,7 +123,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -156,7 +156,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -213,7 +213,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -236,7 +236,7 @@

Method Details

getIamPolicy(resource, body=None, x__xgafv=None) -
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.
+  
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
@@ -299,7 +299,7 @@ 

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -350,7 +350,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -383,7 +383,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the backup to create. # Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -406,7 +406,7 @@

Method Details

setIamPolicy(resource, body=None, x__xgafv=None) -
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.
+  
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources. (required)
@@ -464,7 +464,7 @@ 

Method Details

testIamPermissions(resource, body=None, x__xgafv=None) -
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.
+  
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
diff --git a/docs/dyn/spanner_v1.projects.instances.databases.databaseRoles.html b/docs/dyn/spanner_v1.projects.instances.databases.databaseRoles.html
index 5387c96350..11ed8f68a6 100644
--- a/docs/dyn/spanner_v1.projects.instances.databases.databaseRoles.html
+++ b/docs/dyn/spanner_v1.projects.instances.databases.databaseRoles.html
@@ -85,7 +85,7 @@ 

Instance Methods

Retrieves the next page of results.

testIamPermissions(resource, body=None, x__xgafv=None)

-

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.

+

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.

Method Details

close() @@ -134,7 +134,7 @@

Method Details

testIamPermissions(resource, body=None, x__xgafv=None) -
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.
+  
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
diff --git a/docs/dyn/spanner_v1.projects.instances.databases.html b/docs/dyn/spanner_v1.projects.instances.databases.html
index b340a57b3d..66fdfc4d06 100644
--- a/docs/dyn/spanner_v1.projects.instances.databases.html
+++ b/docs/dyn/spanner_v1.projects.instances.databases.html
@@ -114,7 +114,7 @@ 

Instance Methods

Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending schema updates, those may be queried using the Operations API.

getIamPolicy(resource, body=None, x__xgafv=None)

-

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.

+

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.

getScans(name, endTime=None, startTime=None, view=None, x__xgafv=None)

Request a specific scan with Database-specific data for Cloud Key Visualizer.

@@ -132,10 +132,10 @@

Instance Methods

Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with the same instance configuration as the instance containing the backup. The returned database long-running operation has a name of the format `projects//instances//databases//operations/`, and can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreDatabaseMetadata. The response type is Database, if successful. Cancelling the returned operation will stop the restore and delete the database. There can be only one database being restored into an instance at a time. Once the restore operation completes, a new restore operation can be initiated, without waiting for the optimize operation associated with the first restore to complete.

setIamPolicy(resource, body=None, x__xgafv=None)

-

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.

+

Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.

testIamPermissions(resource, body=None, x__xgafv=None)

-

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.

+

Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.

updateDdl(database, body=None, x__xgafv=None)

Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned long-running operation will have a name of the format `/operations/` and can be used to track execution of the schema change(s). The metadata field type is UpdateDatabaseDdlMetadata. The operation has no response.

@@ -209,7 +209,7 @@

Method Details

"databaseDialect": "A String", # Optional. The dialect of the Cloud Spanner Database. "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Optional. The encryption configuration for the database. If this field is not specified, Cloud Spanner will encrypt/decrypt all data at rest using Google default encryption. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -288,11 +288,11 @@

Method Details

"enableDropProtection": True or False, # Optional. Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, - "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. + "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status` field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -359,7 +359,7 @@

Method Details

getIamPolicy(resource, body=None, x__xgafv=None) -
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.
+  
Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
@@ -631,11 +631,11 @@ 

Method Details

"enableDropProtection": True or False, # Optional. Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, - "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. + "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status` field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -712,11 +712,11 @@

Method Details

"enableDropProtection": True or False, # Optional. Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "kmsKeyNames": [ # Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, - "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. + "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status` field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. "code": 42, # The status code, which should be an enum value of google.rpc.Code. @@ -803,7 +803,7 @@

Method Details

"encryptionConfig": { # Encryption configuration for the restored database. # Optional. An encryption configuration describing the encryption type and key resources in Cloud KMS used to encrypt/decrypt the database to restore to. If this field is not specified, the restored database will use the same encryption configuration as the backup by default, namely encryption_type = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. "encryptionType": "A String", # Required. The encryption type of the restored database. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored database. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. - "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values have the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configurations, specify a single regional location KMS key. * For multi-regional database instance configurations of type `GOOGLE_MANAGED`, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For a database instance configuration of type `USER_MANAGED`, please specify only regional location KMS keys to cover each region in the instance configuration. Multi-regional location KMS keys are not supported for USER_MANAGED instance configurations. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for one or more keys used to encrypt the database. Values have the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. "A String", ], }, @@ -840,7 +840,7 @@

Method Details

setIamPolicy(resource, body=None, x__xgafv=None) -
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.
+  
Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources. (required)
@@ -898,7 +898,7 @@ 

Method Details

testIamPermissions(resource, body=None, x__xgafv=None) -
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.
+  
Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.
 
 Args:
   resource: string, REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources. (required)
@@ -936,7 +936,7 @@ 

Method Details

The object takes the form of: { # Enqueues the given DDL statements to be applied, in order but not necessarily all at once, to the database schema at some point (or points) in the future. The server checks that the statements are executable (syntactically valid, name tables that exist, etc.) before enqueueing them, but they may still fail upon later execution (e.g., if a statement from another batch of statements is applied first and it conflicts in some way, or if there is some data-related problem like a `NULL` value in a column to which `NOT NULL` would be added). If a statement fails, all subsequent statements in the batch are automatically cancelled. Each batch of statements is assigned a name which can be used with the Operations API to monitor progress. See the operation_id field for more details. - "operationId": "A String", # If empty, the new update request is assigned an automatically-generated operation ID. Otherwise, `operation_id` is used to construct the name of the resulting Operation. Specifying an explicit operation ID simplifies determining whether the statements were executed in the event that the UpdateDatabaseDdl call is replayed, or the return value is otherwise lost: the database and `operation_id` fields can be combined to form the name of the resulting longrunning.Operation: `/operations/`. `operation_id` should be unique within the database, and must be a valid identifier: `a-z*`. Note that automatically-generated operation IDs always begin with an underscore. If the named operation already exists, UpdateDatabaseDdl returns `ALREADY_EXISTS`. + "operationId": "A String", # If empty, the new update request is assigned an automatically-generated operation ID. Otherwise, `operation_id` is used to construct the name of the resulting Operation. Specifying an explicit operation ID simplifies determining whether the statements were executed in the event that the UpdateDatabaseDdl call is replayed, or the return value is otherwise lost: the database and `operation_id` fields can be combined to form the `name` of the resulting longrunning.Operation: `/operations/`. `operation_id` should be unique within the database, and must be a valid identifier: `a-z*`. Note that automatically-generated operation IDs always begin with an underscore. If the named operation already exists, UpdateDatabaseDdl returns `ALREADY_EXISTS`. "protoDescriptors": "A String", # Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements. Contains a protobuf-serialized [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). To generate it, [install](https://grpc.io/docs/protoc-installation/) and run `protoc` with --include_imports and --descriptor_set_out. For example, to generate for moon/shot/app.proto, run ``` $protoc --proto_path=/app_path --proto_path=/lib_path \ --include_imports \ --descriptor_set_out=descriptors.data \ moon/shot/app.proto ``` For more details, see protobuffer [self description](https://developers.google.com/protocol-buffers/docs/techniques#self-description). "statements": [ # Required. DDL statements to be applied to the database. "A String", diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html index 9b19af6671..c0af17c493 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html @@ -308,6 +308,79 @@

Method Details

The object takes the form of: { # The request for BeginTransaction. + "mutationKey": { # A modification to one or more Cloud Spanner rows. Mutations can be applied to a Cloud Spanner database by sending them in a Commit call. # Optional. Required for read-write transactions on a multiplexed session that commit mutations but do not perform any reads or queries. Clients should randomly select one of the mutations from the mutation set and send it as a part of this request. + "delete": { # Arguments to delete operations. # Delete rows from a table. Succeeds whether or not the named rows were present. + "keySet": { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All the keys are expected to be in the same table or index. The keys need not be sorted in any particular way. If the same key is specified multiple times in the set (for example if two ranges, two keys, or a key and a range overlap), Cloud Spanner behaves as if the key were only specified once. # Required. The primary keys of the rows within table to delete. The primary keys must be specified in the order in which they appear in the `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL statement used to create the table). Delete is idempotent. The transaction will succeed even if some or all rows do not exist. + "all": True or False, # For convenience `all` can be set to `true` to indicate that this `KeySet` matches all keys in the table or index. Note that any keys specified in `keys` or `ranges` are only yielded once. + "keys": [ # A list of specific keys. Entries in `keys` should have exactly as many elements as there are columns in the primary or index key with which this `KeySet` is used. Individual key values are encoded as described here. + [ + "", + ], + ], + "ranges": [ # A list of key ranges. See KeyRange for more information about key range specifications. + { # KeyRange represents a range of rows in a table or index. A range has a start key and an end key. These keys can be open or closed, indicating if the range includes rows with that key. Keys are represented by lists, where the ith value in the list corresponds to the ith component of the table or index primary key. Individual values are encoded as described here. For example, consider the following table definition: CREATE TABLE UserEvents ( UserName STRING(MAX), EventDate STRING(10) ) PRIMARY KEY(UserName, EventDate); The following keys name rows in this table: "Bob", "2014-09-23" Since the `UserEvents` table's `PRIMARY KEY` clause names two columns, each `UserEvents` key has two elements; the first is the `UserName`, and the second is the `EventDate`. Key ranges with multiple components are interpreted lexicographically by component using the table or index key's declared sort order. For example, the following range returns all events for user `"Bob"` that occurred in the year 2015: "start_closed": ["Bob", "2015-01-01"] "end_closed": ["Bob", "2015-12-31"] Start and end keys can omit trailing key components. This affects the inclusion and exclusion of rows that exactly match the provided key components: if the key is closed, then rows that exactly match the provided components are included; if the key is open, then rows that exactly match are not included. For example, the following range includes all events for `"Bob"` that occurred during and after the year 2000: "start_closed": ["Bob", "2000-01-01"] "end_closed": ["Bob"] The next example retrieves all events for `"Bob"`: "start_closed": ["Bob"] "end_closed": ["Bob"] To retrieve events before the year 2000: "start_closed": ["Bob"] "end_open": ["Bob", "2000-01-01"] The following range includes all rows in the table: "start_closed": [] "end_closed": [] This range returns all users whose `UserName` begins with any character from A to C: "start_closed": ["A"] "end_open": ["D"] This range returns all users whose `UserName` begins with B: "start_closed": ["B"] "end_open": ["C"] Key ranges honor column sort order. For example, suppose a table is defined as follows: CREATE TABLE DescendingSortedTable { Key INT64, ... ) PRIMARY KEY(Key DESC); The following range retrieves all rows with key values between 1 and 100 inclusive: "start_closed": ["100"] "end_closed": ["1"] Note that 100 is passed as the start, and 1 is passed as the end, because `Key` is a descending column in the schema. + "endClosed": [ # If the end is closed, then the range includes all rows whose first `len(end_closed)` key columns exactly match `end_closed`. + "", + ], + "endOpen": [ # If the end is open, then the range excludes rows whose first `len(end_open)` key columns exactly match `end_open`. + "", + ], + "startClosed": [ # If the start is closed, then the range includes all rows whose first `len(start_closed)` key columns exactly match `start_closed`. + "", + ], + "startOpen": [ # If the start is open, then the range excludes rows whose first `len(start_open)` key columns exactly match `start_open`. + "", + ], + }, + ], + }, + "table": "A String", # Required. The table whose rows will be deleted. + }, + "insert": { # Arguments to insert, update, insert_or_update, and replace operations. # Insert new rows in a table. If any of the rows already exist, the write or transaction fails with error `ALREADY_EXISTS`. + "columns": [ # The names of the columns in table to be written. The list of columns must contain enough columns to allow Cloud Spanner to derive values for all primary key columns in the row(s) to be modified. + "A String", + ], + "table": "A String", # Required. The table whose rows will be written. + "values": [ # The values to be written. `values` can contain more than one list of values. If it does, then multiple rows are written, one for each entry in `values`. Each list in `values` must have exactly as many entries as there are entries in columns above. Sending multiple lists is equivalent to sending multiple `Mutation`s, each containing one `values` entry and repeating table and columns. Individual values in each list are encoded as described here. + [ + "", + ], + ], + }, + "insertOrUpdate": { # Arguments to insert, update, insert_or_update, and replace operations. # Like insert, except that if the row already exists, then its column values are overwritten with the ones provided. Any column values not explicitly written are preserved. When using insert_or_update, just as when using insert, all `NOT NULL` columns in the table must be given a value. This holds true even when the row already exists and will therefore actually be updated. + "columns": [ # The names of the columns in table to be written. The list of columns must contain enough columns to allow Cloud Spanner to derive values for all primary key columns in the row(s) to be modified. + "A String", + ], + "table": "A String", # Required. The table whose rows will be written. + "values": [ # The values to be written. `values` can contain more than one list of values. If it does, then multiple rows are written, one for each entry in `values`. Each list in `values` must have exactly as many entries as there are entries in columns above. Sending multiple lists is equivalent to sending multiple `Mutation`s, each containing one `values` entry and repeating table and columns. Individual values in each list are encoded as described here. + [ + "", + ], + ], + }, + "replace": { # Arguments to insert, update, insert_or_update, and replace operations. # Like insert, except that if the row already exists, it is deleted, and the column values provided are inserted instead. Unlike insert_or_update, this means any values not explicitly written become `NULL`. In an interleaved table, if you create the child table with the `ON DELETE CASCADE` annotation, then replacing a parent row also deletes the child rows. Otherwise, you must delete the child rows before you replace the parent row. + "columns": [ # The names of the columns in table to be written. The list of columns must contain enough columns to allow Cloud Spanner to derive values for all primary key columns in the row(s) to be modified. + "A String", + ], + "table": "A String", # Required. The table whose rows will be written. + "values": [ # The values to be written. `values` can contain more than one list of values. If it does, then multiple rows are written, one for each entry in `values`. Each list in `values` must have exactly as many entries as there are entries in columns above. Sending multiple lists is equivalent to sending multiple `Mutation`s, each containing one `values` entry and repeating table and columns. Individual values in each list are encoded as described here. + [ + "", + ], + ], + }, + "update": { # Arguments to insert, update, insert_or_update, and replace operations. # Update existing rows in a table. If any of the rows does not already exist, the transaction fails with error `NOT_FOUND`. + "columns": [ # The names of the columns in table to be written. The list of columns must contain enough columns to allow Cloud Spanner to derive values for all primary key columns in the row(s) to be modified. + "A String", + ], + "table": "A String", # Required. The table whose rows will be written. + "values": [ # The values to be written. `values` can contain more than one list of values. If it does, then multiple rows are written, one for each entry in `values`. Each list in `values` must have exactly as many entries as there are entries in columns above. Sending multiple lists is equivalent to sending multiple `Mutation`s, each containing one `values` entry and repeating table and columns. Individual values in each list are encoded as described here. + [ + "", + ], + ], + }, + }, "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. See TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. @@ -321,6 +394,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -440,6 +514,10 @@

Method Details

}, }, ], + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. If the read-write transaction was executed on a multiplexed session, the precommit token with the highest sequence number received in this transaction attempt, should be included here. Failing to do so will result in a FailedPrecondition error. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "requestOptions": { # Common request options for various APIs. # Common options for this request. "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both request_tag and transaction_tag can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (e.g. CommitRequest). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters will be removed from the string. @@ -459,6 +537,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -478,6 +557,10 @@

Method Details

"mutationCount": "A String", # The total number of mutations for the transaction. Knowing the `mutation_count` value can help you maximize the number of mutations in a transaction and minimize the number of API round trips. You can also monitor this value to prevent transactions from exceeding the system [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data). If the number of mutations exceeds the limit, the server returns [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT). }, "commitTimestamp": "A String", # The Cloud Spanner timestamp at which the transaction committed. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # If specified, transaction has not committed yet. Clients must retry the commit with the new precommit token. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, }
@@ -595,6 +678,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -612,6 +696,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -627,6 +712,10 @@

Method Details

An object of the form: { # The response for ExecuteBatchDml. Contains a list of ResultSet messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the response body identifies the cause of the failure. To check for DML statements that failed, use the following approach: 1. Check the status in the response message. The google.rpc.Code enum value `OK` indicates that all statements were executed successfully. 2. If the status was not `OK`, check the number of result sets in the response. If the response contains `N` ResultSet messages, then statement `N+1` in the request failed. Example 1: * Request: 5 DML statements, all executed successfully. * Response: 5 ResultSet messages, with the status `OK`. Example 2: * Request: 5 DML statements. The third statement has a syntax error. * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`) status. The number of ResultSet messages indicates that the third statement failed, and the fourth and fifth statements were not executed. + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "resultSets": [ # One ResultSet for each statement in the request that ran successfully, in the same order as the statements in the request. Each ResultSet does not contain any rows. The ResultSetStats in each ResultSet contain the number of rows modified by the statement. Only the first ResultSet in the response contains valid ResultSetMetadata. { # Results from Read or ExecuteSql. "metadata": { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information. @@ -655,6 +744,10 @@

Method Details

], }, }, + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "rows": [ # Each element in `rows` is a row whose format is defined by metadata.row_type. The ith element in each row matches the ith field in metadata.row_type. Elements are encoded based on type as described here. [ "", @@ -796,6 +889,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -813,6 +907,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -854,6 +949,10 @@

Method Details

], }, }, + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "rows": [ # Each element in `rows` is a row whose format is defined by metadata.row_type. The ith element in each row matches the ith field in metadata.row_type. Elements are encoded based on type as described here. [ "", @@ -984,6 +1083,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1001,6 +1101,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1043,6 +1144,10 @@

Method Details

], }, }, + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "resumeToken": "A String", # Streaming calls might be interrupted for a variety of reasons, such as TCP connection loss. If this occurs, the stream of results can be resumed by re-sending the original request and including `resume_token`. Note that executing any other transaction in the same session invalidates the token. "stats": { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the statement that produced this streaming result set. These can be requested by setting ExecuteSqlRequest.query_mode and are sent only once with the last response in the stream. This field will also be present in the last response for DML statements. "queryPlan": { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result. @@ -1217,6 +1322,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1234,6 +1340,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1322,6 +1429,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1339,6 +1447,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1453,6 +1562,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1470,6 +1580,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1511,6 +1622,10 @@

Method Details

], }, }, + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "rows": [ # Each element in `rows` is a row whose format is defined by metadata.row_type. The ith element in each row matches the ith field in metadata.row_type. Elements are encoded based on type as described here. [ "", @@ -1672,6 +1787,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1689,6 +1805,7 @@

Method Details

"strong": True or False, # Read at a timestamp where all previously committed transactions are visible. }, "readWrite": { # Message type to initiate a read-write transaction. Currently this transaction type has no options. # Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource. + "multiplexedSessionPreviousTransactionId": "A String", # Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session. "readLockMode": "A String", # Read lock mode for the transaction. }, }, @@ -1731,6 +1848,10 @@

Method Details

], }, }, + "precommitToken": { # When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses. # Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction. + "precommitToken": "A String", # Opaque precommit token. + "seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. + }, "resumeToken": "A String", # Streaming calls might be interrupted for a variety of reasons, such as TCP connection loss. If this occurs, the stream of results can be resumed by re-sending the original request and including `resume_token`. Note that executing any other transaction in the same session invalidates the token. "stats": { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the statement that produced this streaming result set. These can be requested by setting ExecuteSqlRequest.query_mode and are sent only once with the last response in the stream. This field will also be present in the last response for DML statements. "queryPlan": { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result. diff --git a/docs/dyn/spanner_v1.projects.instances.html b/docs/dyn/spanner_v1.projects.instances.html index 40484b2600..67ea3726fe 100644 --- a/docs/dyn/spanner_v1.projects.instances.html +++ b/docs/dyn/spanner_v1.projects.instances.html @@ -189,6 +189,7 @@

Method Details

}, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. + "defaultBackupScheduleType": "A String", # Optional. Controls the default backup behavior for new databases within the instance. Note that `AUTOMATIC` is not permitted for free instances, as backups and backup schedules are not allowed for free instances. In the `GetInstance` or `ListInstances` response, if the value of default_backup_schedule_type is unset or NONE, no default backup schedule will be created for new databases within the instance. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. "edition": "A String", # Optional. The `Edition` of the current instance. "endpointUris": [ # Deprecated. This field is not populated. @@ -204,8 +205,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length. - "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying node count across replicas (achieved by setting asymmetric_autoscaling_options in autoscaling config), the node_count here is the maximum node count across all replicas. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). - "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying processing units per replica (achieved by setting asymmetric_autoscaling_options in autoscaling config), the processing_units here is the maximum processing units across all replicas. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). "replicaComputeCapacity": [ # Output only. Lists the compute capacity per ReplicaSelection. A replica selection identifies a set of replicas with common properties. Replicas identified by a ReplicaSelection are scaled with the same compute capacity. { # ReplicaComputeCapacity describes the amount of server resources that are allocated to each replica identified by the replica selection. "nodeCount": 42, # The number of nodes allocated to each replica. This may be zero in API responses for instances that are not yet in state `READY`. @@ -314,6 +315,7 @@

Method Details

}, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. + "defaultBackupScheduleType": "A String", # Optional. Controls the default backup behavior for new databases within the instance. Note that `AUTOMATIC` is not permitted for free instances, as backups and backup schedules are not allowed for free instances. In the `GetInstance` or `ListInstances` response, if the value of default_backup_schedule_type is unset or NONE, no default backup schedule will be created for new databases within the instance. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. "edition": "A String", # Optional. The `Edition` of the current instance. "endpointUris": [ # Deprecated. This field is not populated. @@ -329,8 +331,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length. - "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying node count across replicas (achieved by setting asymmetric_autoscaling_options in autoscaling config), the node_count here is the maximum node count across all replicas. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). - "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying processing units per replica (achieved by setting asymmetric_autoscaling_options in autoscaling config), the processing_units here is the maximum processing units across all replicas. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). "replicaComputeCapacity": [ # Output only. Lists the compute capacity per ReplicaSelection. A replica selection identifies a set of replicas with common properties. Replicas identified by a ReplicaSelection are scaled with the same compute capacity. { # ReplicaComputeCapacity describes the amount of server resources that are allocated to each replica identified by the replica selection. "nodeCount": 42, # The number of nodes allocated to each replica. This may be zero in API responses for instances that are not yet in state `READY`. @@ -439,6 +441,7 @@

Method Details

}, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. + "defaultBackupScheduleType": "A String", # Optional. Controls the default backup behavior for new databases within the instance. Note that `AUTOMATIC` is not permitted for free instances, as backups and backup schedules are not allowed for free instances. In the `GetInstance` or `ListInstances` response, if the value of default_backup_schedule_type is unset or NONE, no default backup schedule will be created for new databases within the instance. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. "edition": "A String", # Optional. The `Edition` of the current instance. "endpointUris": [ # Deprecated. This field is not populated. @@ -454,8 +457,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length. - "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying node count across replicas (achieved by setting asymmetric_autoscaling_options in autoscaling config), the node_count here is the maximum node count across all replicas. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). - "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying processing units per replica (achieved by setting asymmetric_autoscaling_options in autoscaling config), the processing_units here is the maximum processing units across all replicas. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). "replicaComputeCapacity": [ # Output only. Lists the compute capacity per ReplicaSelection. A replica selection identifies a set of replicas with common properties. Replicas identified by a ReplicaSelection are scaled with the same compute capacity. { # ReplicaComputeCapacity describes the amount of server resources that are allocated to each replica identified by the replica selection. "nodeCount": 42, # The number of nodes allocated to each replica. This may be zero in API responses for instances that are not yet in state `READY`. @@ -574,6 +577,7 @@

Method Details

}, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. + "defaultBackupScheduleType": "A String", # Optional. Controls the default backup behavior for new databases within the instance. Note that `AUTOMATIC` is not permitted for free instances, as backups and backup schedules are not allowed for free instances. In the `GetInstance` or `ListInstances` response, if the value of default_backup_schedule_type is unset or NONE, no default backup schedule will be created for new databases within the instance. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. "edition": "A String", # Optional. The `Edition` of the current instance. "endpointUris": [ # Deprecated. This field is not populated. @@ -589,8 +593,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length. - "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying node count across replicas (achieved by setting asymmetric_autoscaling_options in autoscaling config), the node_count here is the maximum node count across all replicas. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). - "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying processing units per replica (achieved by setting asymmetric_autoscaling_options in autoscaling config), the processing_units here is the maximum processing units across all replicas. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "nodeCount": 42, # The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity). + "processingUnits": 42, # The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity). "replicaComputeCapacity": [ # Output only. Lists the compute capacity per ReplicaSelection. A replica selection identifies a set of replicas with common properties. Replicas identified by a ReplicaSelection are scaled with the same compute capacity. { # ReplicaComputeCapacity describes the amount of server resources that are allocated to each replica identified by the replica selection. "nodeCount": 42, # The number of nodes allocated to each replica. This may be zero in API responses for instances that are not yet in state `READY`. diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json index a3e34f2a1e..0698e769ba 100644 --- a/googleapiclient/discovery_cache/documents/spanner.v1.json +++ b/googleapiclient/discovery_cache/documents/spanner.v1.json @@ -1044,7 +1044,7 @@ "type": "string" }, "encryptionConfig.kmsKeyNames": { -"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", +"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", "location": "query", "repeated": true, "type": "string" @@ -1122,7 +1122,7 @@ ] }, "getIamPolicy": { -"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", +"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.backups.getIamPolicy", @@ -1228,7 +1228,7 @@ ] }, "setIamPolicy": { -"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", +"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.backups.setIamPolicy", @@ -1257,7 +1257,7 @@ ] }, "testIamPermissions": { -"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", +"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.backups.testIamPermissions", @@ -1425,7 +1425,7 @@ ], "parameters": { "filter": { -"description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \\ `(metadata.source_type:BACKUP) AND` \\ `(metadata.backup_info.backup:backup_howl) AND` \\ `(metadata.name:restored_howl) AND` \\ `(metadata.progress.start_time < \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains \"backup_howl\". * The restored database's name contains \"restored_howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", +"description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \\ `(metadata.source_type:BACKUP) AND` \\ `(metadata.backup_info.backup:backup_howl) AND` \\ `(metadata.name:restored_howl) AND` \\ `(metadata.progress.start_time < \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains \"backup_howl\". * The restored database's name contains \"restored_howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", "location": "query", "type": "string" }, @@ -1598,7 +1598,7 @@ ] }, "getIamPolicy": { -"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", +"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.getIamPolicy", @@ -1781,7 +1781,7 @@ ] }, "setIamPolicy": { -"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", +"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.setIamPolicy", @@ -1810,7 +1810,7 @@ ] }, "testIamPermissions": { -"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", +"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.databases.testIamPermissions", @@ -1958,7 +1958,7 @@ ] }, "getIamPolicy": { -"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", +"description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.getIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.backupSchedules.getIamPolicy", @@ -2059,7 +2059,7 @@ ] }, "setIamPolicy": { -"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", +"description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource. For backup schedules, authorization requires `spanner.backupSchedules.setIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.backupSchedules.setIamPolicy", @@ -2088,7 +2088,7 @@ ] }, "testIamPermissions": { -"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", +"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.databases.backupSchedules.testIamPermissions", @@ -2158,7 +2158,7 @@ ] }, "testIamPermissions": { -"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", +"description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance. Calling this method on a backup schedule that does not exist will result in a NOT_FOUND error if the user has `spanner.backupSchedules.list` permission on the containing database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/databaseRoles/{databaseRolesId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.databases.databaseRoles.testIamPermissions", @@ -3319,7 +3319,7 @@ } } }, -"revision": "20241018", +"revision": "20241020", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "AsymmetricAutoscalingOption": { @@ -3462,7 +3462,7 @@ "readOnly": true }, "encryptionInformation": { -"description": "Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined.", +"description": "Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined.", "items": { "$ref": "EncryptionInfo" }, @@ -3706,6 +3706,10 @@ "description": "The request for BeginTransaction.", "id": "BeginTransactionRequest", "properties": { +"mutationKey": { +"$ref": "Mutation", +"description": "Optional. Required for read-write transactions on a multiplexed session that commit mutations but do not perform any reads or queries. Clients should randomly select one of the mutations from the mutation set and send it as a part of this request." +}, "options": { "$ref": "TransactionOptions", "description": "Required. Options for the new transaction." @@ -3815,6 +3819,10 @@ }, "type": "array" }, +"precommitToken": { +"$ref": "MultiplexedSessionPrecommitToken", +"description": "Optional. If the read-write transaction was executed on a multiplexed session, the precommit token with the highest sequence number received in this transaction attempt, should be included here. Failing to do so will result in a FailedPrecondition error." +}, "requestOptions": { "$ref": "RequestOptions", "description": "Common options for this request." @@ -3847,6 +3855,10 @@ "description": "The Cloud Spanner timestamp at which the transaction committed.", "format": "google-datetime", "type": "string" +}, +"precommitToken": { +"$ref": "MultiplexedSessionPrecommitToken", +"description": "If specified, transaction has not committed yet. Clients must retry the commit with the new precommit token." } }, "type": "object" @@ -3926,7 +3938,7 @@ "type": "string" }, "kmsKeyNames": { -"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. Kms keys specified can be in any order. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", +"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. KMS keys specified can be in any order. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", "items": { "type": "string" }, @@ -4008,7 +4020,7 @@ "type": "string" }, "kmsKeyNames": { -"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", +"description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", "items": { "type": "string" }, @@ -4307,7 +4319,7 @@ "readOnly": true }, "encryptionInfo": { -"description": "Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field.", +"description": "Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status` field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field.", "items": { "$ref": "EncryptionInfo" }, @@ -4498,7 +4510,7 @@ "type": "string" }, "kmsKeyNames": { -"description": "Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", +"description": "Specifies the KMS configuration for one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", "items": { "type": "string" }, @@ -4584,6 +4596,10 @@ "description": "The response for ExecuteBatchDml. Contains a list of ResultSet messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the response body identifies the cause of the failure. To check for DML statements that failed, use the following approach: 1. Check the status in the response message. The google.rpc.Code enum value `OK` indicates that all statements were executed successfully. 2. If the status was not `OK`, check the number of result sets in the response. If the response contains `N` ResultSet messages, then statement `N+1` in the request failed. Example 1: * Request: 5 DML statements, all executed successfully. * Response: 5 ResultSet messages, with the status `OK`. Example 2: * Request: 5 DML statements. The third statement has a syntax error. * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`) status. The number of ResultSet messages indicates that the third statement failed, and the fourth and fifth statements were not executed.", "id": "ExecuteBatchDmlResponse", "properties": { +"precommitToken": { +"$ref": "MultiplexedSessionPrecommitToken", +"description": "Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." +}, "resultSets": { "description": "One ResultSet for each statement in the request that ran successfully, in the same order as the statements in the request. Each ResultSet does not contain any rows. The ResultSetStats in each ResultSet contain the number of rows modified by the statement. Only the first ResultSet in the response contains valid ResultSetMetadata.", "items": { @@ -4886,6 +4902,20 @@ "readOnly": true, "type": "string" }, +"defaultBackupScheduleType": { +"description": "Optional. Controls the default backup behavior for new databases within the instance. Note that `AUTOMATIC` is not permitted for free instances, as backups and backup schedules are not allowed for free instances. In the `GetInstance` or `ListInstances` response, if the value of default_backup_schedule_type is unset or NONE, no default backup schedule will be created for new databases within the instance.", +"enum": [ +"DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED", +"NONE", +"AUTOMATIC" +], +"enumDescriptions": [ +"Not specified.", +"No default backup schedule will be created automatically on creation of a database within the instance.", +"A default backup schedule will be created automatically on creation of a database within the instance. The default backup schedule creates a full backup every 24 hours and retains the backup for a period of 7 days. Once created, the default backup schedule can be edited/deleted similar to any other backup schedule." +], +"type": "string" +}, "displayName": { "description": "Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length.", "type": "string" @@ -4943,12 +4973,12 @@ "type": "string" }, "nodeCount": { -"description": "The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying node count across replicas (achieved by setting asymmetric_autoscaling_options in autoscaling config), the node_count here is the maximum node count across all replicas. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", +"description": "The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", "format": "int32", "type": "integer" }, "processingUnits": { -"description": "The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. If the instance has varying processing units per replica (achieved by setting asymmetric_autoscaling_options in autoscaling config), the processing_units here is the maximum processing units across all replicas. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", +"description": "The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", "format": "int32", "type": "integer" }, @@ -5905,6 +5935,10 @@ "$ref": "ResultSetMetadata", "description": "Metadata about the result set, such as row type information. Only present in the first response." }, +"precommitToken": { +"$ref": "MultiplexedSessionPrecommitToken", +"description": "Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." +}, "resumeToken": { "description": "Streaming calls might be interrupted for a variety of reasons, such as TCP connection loss. If this occurs, the stream of results can be resumed by re-sending the original request and including `resume_token`. Note that executing any other transaction in the same session invalidates the token.", "format": "byte", @@ -6380,6 +6414,11 @@ "description": "Message type to initiate a read-write transaction. Currently this transaction type has no options.", "id": "ReadWrite", "properties": { +"multiplexedSessionPreviousTransactionId": { +"description": "Optional. Clients should pass the transaction ID of the previous transaction attempt that was aborted if this transaction is being executed on a multiplexed session.", +"format": "byte", +"type": "string" +}, "readLockMode": { "description": "Read lock mode for the transaction.", "enum": [ @@ -6529,7 +6568,7 @@ "type": "string" }, "kmsKeyNames": { -"description": "Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values have the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configurations, specify a single regional location KMS key. * For multi-regional database instance configurations of type `GOOGLE_MANAGED`, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For a database instance configuration of type `USER_MANAGED`, please specify only regional location KMS keys to cover each region in the instance configuration. Multi-regional location KMS keys are not supported for USER_MANAGED instance configurations.", +"description": "Optional. Specifies the KMS configuration for one or more keys used to encrypt the database. Values have the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the database's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations.", "items": { "type": "string" }, @@ -6628,6 +6667,10 @@ "$ref": "ResultSetMetadata", "description": "Metadata about the result set, such as row type information." }, +"precommitToken": { +"$ref": "MultiplexedSessionPrecommitToken", +"description": "Optional. A precommit token will be included if the read-write transaction is on a multiplexed session. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." +}, "rows": { "description": "Each element in `rows` is a row whose format is defined by metadata.row_type. The ith element in each row matches the ith field in metadata.row_type. Elements are encoded based on type as described here.", "items": { @@ -7122,7 +7165,7 @@ "id": "UpdateDatabaseDdlRequest", "properties": { "operationId": { -"description": "If empty, the new update request is assigned an automatically-generated operation ID. Otherwise, `operation_id` is used to construct the name of the resulting Operation. Specifying an explicit operation ID simplifies determining whether the statements were executed in the event that the UpdateDatabaseDdl call is replayed, or the return value is otherwise lost: the database and `operation_id` fields can be combined to form the name of the resulting longrunning.Operation: `/operations/`. `operation_id` should be unique within the database, and must be a valid identifier: `a-z*`. Note that automatically-generated operation IDs always begin with an underscore. If the named operation already exists, UpdateDatabaseDdl returns `ALREADY_EXISTS`.", +"description": "If empty, the new update request is assigned an automatically-generated operation ID. Otherwise, `operation_id` is used to construct the name of the resulting Operation. Specifying an explicit operation ID simplifies determining whether the statements were executed in the event that the UpdateDatabaseDdl call is replayed, or the return value is otherwise lost: the database and `operation_id` fields can be combined to form the `name` of the resulting longrunning.Operation: `/operations/`. `operation_id` should be unique within the database, and must be a valid identifier: `a-z*`. Note that automatically-generated operation IDs always begin with an underscore. If the named operation already exists, UpdateDatabaseDdl returns `ALREADY_EXISTS`.", "type": "string" }, "protoDescriptors": { From 93d4fe8f6f7b366141b8ccdadaf00b4a6729f2b3 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 16/18] feat(speech): update the api #### speech:v1 The following keys were added: - endpoints (Total Keys: 1) #### speech:v1p1beta1 The following keys were added: - endpoints (Total Keys: 1) --- .../discovery_cache/documents/speech.v1.json | 19 ++++++++++++++++++- .../documents/speech.v1p1beta1.json | 19 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/googleapiclient/discovery_cache/documents/speech.v1.json b/googleapiclient/discovery_cache/documents/speech.v1.json index d78bcb7ae0..0d51812c5d 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1.json @@ -15,6 +15,23 @@ "description": "Converts audio to text by applying powerful neural network models.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol", +"endpoints": [ +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.us-central1.rep.googleapis.com/", +"location": "us-central1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.us-west1.rep.googleapis.com/", +"location": "us-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.me-west1.rep.googleapis.com/", +"location": "me-west1" +} +], "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -524,7 +541,7 @@ } } }, -"revision": "20240926", +"revision": "20241024", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { diff --git a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json index 0a3d81c938..3754ce7278 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json @@ -15,6 +15,23 @@ "description": "Converts audio to text by applying powerful neural network models.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol", +"endpoints": [ +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.us-central1.rep.googleapis.com/", +"location": "us-central1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.us-west1.rep.googleapis.com/", +"location": "us-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://speech.me-west1.rep.googleapis.com/", +"location": "me-west1" +} +], "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -524,7 +541,7 @@ } } }, -"revision": "20240926", +"revision": "20241024", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { From 878a1cefd3e88c3c9736ae394f2133c87deb6311 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Wed, 30 Oct 2024 20:33:48 +0000 Subject: [PATCH 17/18] fix(workspaceevents): update the api #### workspaceevents:v1 The following keys were changed: - resources.operations.methods.get.scopes (Total Keys: 1) - resources.subscriptions.methods.create.scopes (Total Keys: 1) - resources.subscriptions.methods.delete.scopes (Total Keys: 1) - resources.subscriptions.methods.get.scopes (Total Keys: 1) - resources.subscriptions.methods.list.scopes (Total Keys: 1) - resources.subscriptions.methods.patch.scopes (Total Keys: 1) - resources.subscriptions.methods.reactivate.scopes (Total Keys: 1) --- .../dyn/workspaceevents_v1.subscriptions.html | 4 +- .../documents/workspaceevents.v1.json | 54 ++++++++++++++++++- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/docs/dyn/workspaceevents_v1.subscriptions.html b/docs/dyn/workspaceevents_v1.subscriptions.html index 2e03887c06..395286e965 100644 --- a/docs/dyn/workspaceevents_v1.subscriptions.html +++ b/docs/dyn/workspaceevents_v1.subscriptions.html @@ -97,7 +97,7 @@

Instance Methods

[Developer Preview](https://developers.google.com/workspace/preview): Updates or renews a Google Workspace subscription. To learn how to use this method, see [Update or renew a Google Workspace subscription](https://developers.google.com/workspace/events/guides/update-subscription).

reactivate(name, body=None, x__xgafv=None)

-

[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).

+

[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. This method will ignore or reject any subscription that isn't currently in a suspended state. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).

Method Details

close() @@ -373,7 +373,7 @@

Method Details

reactivate(name, body=None, x__xgafv=None) -
[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).
+  
[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. This method will ignore or reject any subscription that isn't currently in a suspended state. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).
 
 Args:
   name: string, Required. Resource name of the subscription. Format: `subscriptions/{subscription}` (required)
diff --git a/googleapiclient/discovery_cache/documents/workspaceevents.v1.json b/googleapiclient/discovery_cache/documents/workspaceevents.v1.json
index 0ded64facc..8fb95ea46d 100644
--- a/googleapiclient/discovery_cache/documents/workspaceevents.v1.json
+++ b/googleapiclient/discovery_cache/documents/workspaceevents.v1.json
@@ -29,6 +29,21 @@
 "https://www.googleapis.com/auth/chat.spaces.readonly": {
 "description": "View chat and spaces in Google Chat"
 },
+"https://www.googleapis.com/auth/drive": {
+"description": "See, edit, create, and delete all of your Google Drive files"
+},
+"https://www.googleapis.com/auth/drive.file": {
+"description": "See, edit, create, and delete only the specific Google Drive files you use with this app"
+},
+"https://www.googleapis.com/auth/drive.metadata": {
+"description": "View and manage metadata of files in your Google Drive"
+},
+"https://www.googleapis.com/auth/drive.metadata.readonly": {
+"description": "See information about your Google Drive files"
+},
+"https://www.googleapis.com/auth/drive.readonly": {
+"description": "See and download all your Google Drive files"
+},
 "https://www.googleapis.com/auth/meetings.space.created": {
 "description": "Create, edit, and see information about your Google Meet conferences created by the app."
 },
@@ -168,6 +183,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -205,6 +225,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -255,6 +280,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -290,6 +320,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -332,6 +367,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -380,12 +420,17 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
 },
 "reactivate": {
-"description": "[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).",
+"description": "[Developer Preview](https://developers.google.com/workspace/preview): Reactivates a suspended Google Workspace subscription. This method resets your subscription's `State` field to `ACTIVE`. Before you use this method, you must fix the error that suspended the subscription. This method will ignore or reject any subscription that isn't currently in a suspended state. To learn how to use this method, see [Reactivate a Google Workspace subscription](https://developers.google.com/workspace/events/guides/reactivate-subscription).",
 "flatPath": "v1/subscriptions/{subscriptionsId}:reactivate",
 "httpMethod": "POST",
 "id": "workspaceevents.subscriptions.reactivate",
@@ -417,6 +462,11 @@
 "https://www.googleapis.com/auth/chat.messages.readonly",
 "https://www.googleapis.com/auth/chat.spaces",
 "https://www.googleapis.com/auth/chat.spaces.readonly",
+"https://www.googleapis.com/auth/drive",
+"https://www.googleapis.com/auth/drive.file",
+"https://www.googleapis.com/auth/drive.metadata",
+"https://www.googleapis.com/auth/drive.metadata.readonly",
+"https://www.googleapis.com/auth/drive.readonly",
 "https://www.googleapis.com/auth/meetings.space.created",
 "https://www.googleapis.com/auth/meetings.space.readonly"
 ]
@@ -424,7 +474,7 @@
 }
 }
 },
-"revision": "20241008",
+"revision": "20241027",
 "rootUrl": "https://workspaceevents.googleapis.com/",
 "schemas": {
 "ListSubscriptionsResponse": {

From 2a4927ed5fb30b4e83d32242c30cd9e58575d5be Mon Sep 17 00:00:00 2001
From: Yoshi Automation 
Date: Wed, 30 Oct 2024 20:33:48 +0000
Subject: [PATCH 18/18] chore(docs): Add new discovery artifacts and artifacts
 with minor updates

---
 docs/dyn/androidenterprise_v1.devices.html    |    8 +-
 ....projects.locations.workflowTemplates.html |   14 +-
 .../dataproc_v1.projects.regions.jobs.html    |   16 +-
 ...v1.projects.regions.workflowTemplates.html |   14 +-
 ...firebase_v1beta1.projects.androidApps.html |   12 +-
 docs/dyn/firebase_v1beta1.projects.html       |    2 +-
 .../firebase_v1beta1.projects.iosApps.html    |   12 +-
 .../firebase_v1beta1.projects.webApps.html    |   12 +-
 docs/dyn/iam_v1.locations.workforcePools.html |   36 +-
 ...v1.locations.workforcePools.providers.html |   72 +-
 ...unts_v1beta.accounts.shippingSettings.html |   12 +-
 docs/dyn/osconfig_v2beta.folders.html         |   91 +
 ...nfig_v2beta.folders.locations.global_.html |   91 +
 ...locations.global_.policyOrchestrators.html | 1468 +++++++++++
 .../osconfig_v2beta.folders.locations.html    |   96 +
 ...g_v2beta.folders.locations.operations.html |  235 ++
 docs/dyn/osconfig_v2beta.html                 |  121 +
 docs/dyn/osconfig_v2beta.organizations.html   |   91 +
 ...2beta.organizations.locations.global_.html |   91 +
 ...locations.global_.policyOrchestrators.html | 1468 +++++++++++
 ...config_v2beta.organizations.locations.html |   96 +
 ...ta.organizations.locations.operations.html |  235 ++
 docs/dyn/osconfig_v2beta.projects.html        |   91 +
 ...fig_v2beta.projects.locations.global_.html |   91 +
 ...locations.global_.policyOrchestrators.html | 1468 +++++++++++
 .../osconfig_v2beta.projects.locations.html   |   96 +
 ..._v2beta.projects.locations.operations.html |  235 ++
 .../documents/alertcenter.v1beta1.json        |   16 +-
 .../documents/androidenterprise.v1.json       |    4 +-
 .../documents/dataproc.v1.json                |    6 +-
 .../documents/firebase.v1beta1.json           |   16 +-
 .../discovery_cache/documents/iam.v1.json     |   30 +-
 .../merchantapi.accounts_v1beta.json          |    6 +-
 .../documents/osconfig.v2beta.json            | 2274 +++++++++++++++++
 34 files changed, 8483 insertions(+), 143 deletions(-)
 create mode 100644 docs/dyn/osconfig_v2beta.folders.html
 create mode 100644 docs/dyn/osconfig_v2beta.folders.locations.global_.html
 create mode 100644 docs/dyn/osconfig_v2beta.folders.locations.global_.policyOrchestrators.html
 create mode 100644 docs/dyn/osconfig_v2beta.folders.locations.html
 create mode 100644 docs/dyn/osconfig_v2beta.folders.locations.operations.html
 create mode 100644 docs/dyn/osconfig_v2beta.html
 create mode 100644 docs/dyn/osconfig_v2beta.organizations.html
 create mode 100644 docs/dyn/osconfig_v2beta.organizations.locations.global_.html
 create mode 100644 docs/dyn/osconfig_v2beta.organizations.locations.global_.policyOrchestrators.html
 create mode 100644 docs/dyn/osconfig_v2beta.organizations.locations.html
 create mode 100644 docs/dyn/osconfig_v2beta.organizations.locations.operations.html
 create mode 100644 docs/dyn/osconfig_v2beta.projects.html
 create mode 100644 docs/dyn/osconfig_v2beta.projects.locations.global_.html
 create mode 100644 docs/dyn/osconfig_v2beta.projects.locations.global_.policyOrchestrators.html
 create mode 100644 docs/dyn/osconfig_v2beta.projects.locations.html
 create mode 100644 docs/dyn/osconfig_v2beta.projects.locations.operations.html
 create mode 100644 googleapiclient/discovery_cache/documents/osconfig.v2beta.json

diff --git a/docs/dyn/androidenterprise_v1.devices.html b/docs/dyn/androidenterprise_v1.devices.html
index 4b7bb4a18f..d530f6ca93 100644
--- a/docs/dyn/androidenterprise_v1.devices.html
+++ b/docs/dyn/androidenterprise_v1.devices.html
@@ -140,7 +140,7 @@ 

Method Details

"managementType": "A String", # Identifies the extent to which the device is controlled by a managed Google Play EMM in various deployment configurations. Possible values include: - "managedDevice", a device that has the EMM's device policy controller (DPC) as the device owner. - "managedProfile", a device that has a profile managed by the DPC (DPC is profile owner) in addition to a separate, personal profile that is unavailable to the DPC. - "containerApp", no longer used (deprecated). - "unmanagedProfile", a device that has been allowed (by the domain's admin, using the Admin Console to enable the privilege) to use managed Google Play, but the profile is itself not owned by a DPC. "model": "A String", # The model name of the device. This comes from android.os.Build.MODEL. "policy": { # The device policy for a given managed device. # The policy enforced on the device. - "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. "choiceToTheUser" allows the device's user to configure the app update policy. "always" enables auto updates. "never" disables auto updates. "wifiOnly" enables auto updates only when the device is connected to wifi. + "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. - choiceToTheUser allows the device's user to configure the app update policy. - always enables auto updates. - never disables auto updates. - wifiOnly enables auto updates only when the device is connected to wifi. *Important:* Changes to app update policies don't affect updates that are in progress. Any policy changes will apply to subsequent app updates. "deviceReportPolicy": "A String", # Whether the device reports app states to the EMM. The default value is "deviceReportDisabled". "maintenanceWindow": { # Maintenance window for managed Google Play Accounts. This allows Play store to update the apps on the foreground in the designated window. # The maintenance window defining when apps running in the foreground should be updated. "durationMs": "A String", # Duration of the maintenance window, in milliseconds. The duration must be between 30 minutes and 24 hours (inclusive). @@ -283,7 +283,7 @@

Method Details

"managementType": "A String", # Identifies the extent to which the device is controlled by a managed Google Play EMM in various deployment configurations. Possible values include: - "managedDevice", a device that has the EMM's device policy controller (DPC) as the device owner. - "managedProfile", a device that has a profile managed by the DPC (DPC is profile owner) in addition to a separate, personal profile that is unavailable to the DPC. - "containerApp", no longer used (deprecated). - "unmanagedProfile", a device that has been allowed (by the domain's admin, using the Admin Console to enable the privilege) to use managed Google Play, but the profile is itself not owned by a DPC. "model": "A String", # The model name of the device. This comes from android.os.Build.MODEL. "policy": { # The device policy for a given managed device. # The policy enforced on the device. - "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. "choiceToTheUser" allows the device's user to configure the app update policy. "always" enables auto updates. "never" disables auto updates. "wifiOnly" enables auto updates only when the device is connected to wifi. + "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. - choiceToTheUser allows the device's user to configure the app update policy. - always enables auto updates. - never disables auto updates. - wifiOnly enables auto updates only when the device is connected to wifi. *Important:* Changes to app update policies don't affect updates that are in progress. Any policy changes will apply to subsequent app updates. "deviceReportPolicy": "A String", # Whether the device reports app states to the EMM. The default value is "deviceReportDisabled". "maintenanceWindow": { # Maintenance window for managed Google Play Accounts. This allows Play store to update the apps on the foreground in the designated window. # The maintenance window defining when apps running in the foreground should be updated. "durationMs": "A String", # Duration of the maintenance window, in milliseconds. The duration must be between 30 minutes and 24 hours (inclusive). @@ -429,7 +429,7 @@

Method Details

"managementType": "A String", # Identifies the extent to which the device is controlled by a managed Google Play EMM in various deployment configurations. Possible values include: - "managedDevice", a device that has the EMM's device policy controller (DPC) as the device owner. - "managedProfile", a device that has a profile managed by the DPC (DPC is profile owner) in addition to a separate, personal profile that is unavailable to the DPC. - "containerApp", no longer used (deprecated). - "unmanagedProfile", a device that has been allowed (by the domain's admin, using the Admin Console to enable the privilege) to use managed Google Play, but the profile is itself not owned by a DPC. "model": "A String", # The model name of the device. This comes from android.os.Build.MODEL. "policy": { # The device policy for a given managed device. # The policy enforced on the device. - "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. "choiceToTheUser" allows the device's user to configure the app update policy. "always" enables auto updates. "never" disables auto updates. "wifiOnly" enables auto updates only when the device is connected to wifi. + "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. - choiceToTheUser allows the device's user to configure the app update policy. - always enables auto updates. - never disables auto updates. - wifiOnly enables auto updates only when the device is connected to wifi. *Important:* Changes to app update policies don't affect updates that are in progress. Any policy changes will apply to subsequent app updates. "deviceReportPolicy": "A String", # Whether the device reports app states to the EMM. The default value is "deviceReportDisabled". "maintenanceWindow": { # Maintenance window for managed Google Play Accounts. This allows Play store to update the apps on the foreground in the designated window. # The maintenance window defining when apps running in the foreground should be updated. "durationMs": "A String", # Duration of the maintenance window, in milliseconds. The duration must be between 30 minutes and 24 hours (inclusive). @@ -542,7 +542,7 @@

Method Details

"managementType": "A String", # Identifies the extent to which the device is controlled by a managed Google Play EMM in various deployment configurations. Possible values include: - "managedDevice", a device that has the EMM's device policy controller (DPC) as the device owner. - "managedProfile", a device that has a profile managed by the DPC (DPC is profile owner) in addition to a separate, personal profile that is unavailable to the DPC. - "containerApp", no longer used (deprecated). - "unmanagedProfile", a device that has been allowed (by the domain's admin, using the Admin Console to enable the privilege) to use managed Google Play, but the profile is itself not owned by a DPC. "model": "A String", # The model name of the device. This comes from android.os.Build.MODEL. "policy": { # The device policy for a given managed device. # The policy enforced on the device. - "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. "choiceToTheUser" allows the device's user to configure the app update policy. "always" enables auto updates. "never" disables auto updates. "wifiOnly" enables auto updates only when the device is connected to wifi. + "autoUpdatePolicy": "A String", # Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. - choiceToTheUser allows the device's user to configure the app update policy. - always enables auto updates. - never disables auto updates. - wifiOnly enables auto updates only when the device is connected to wifi. *Important:* Changes to app update policies don't affect updates that are in progress. Any policy changes will apply to subsequent app updates. "deviceReportPolicy": "A String", # Whether the device reports app states to the EMM. The default value is "deviceReportDisabled". "maintenanceWindow": { # Maintenance window for managed Google Play Accounts. This allows Play store to update the apps on the foreground in the designated window. # The maintenance window defining when apps running in the foreground should be updated. "durationMs": "A String", # Duration of the maintenance window, in milliseconds. The duration must be between 30 minutes and 24 hours (inclusive). diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html index e35839107f..e58da1852c 100644 --- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html @@ -245,7 +245,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -947,7 +947,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -1676,7 +1676,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -2469,7 +2469,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -3212,7 +3212,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -4023,7 +4023,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -4725,7 +4725,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], diff --git a/docs/dyn/dataproc_v1.projects.regions.jobs.html b/docs/dyn/dataproc_v1.projects.regions.jobs.html index c646c05ff2..a1a321e9bc 100644 --- a/docs/dyn/dataproc_v1.projects.regions.jobs.html +++ b/docs/dyn/dataproc_v1.projects.regions.jobs.html @@ -257,7 +257,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -568,7 +568,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -907,7 +907,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -1208,7 +1208,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -1486,7 +1486,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -1825,7 +1825,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -2104,7 +2104,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -2385,7 +2385,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], diff --git a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html index 938a565202..33f5a6b8ba 100644 --- a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html @@ -245,7 +245,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -947,7 +947,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -1676,7 +1676,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -2469,7 +2469,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -3212,7 +3212,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -4023,7 +4023,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], @@ -4725,7 +4725,7 @@

Method Details

], }, }, - "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Optional. Job is a PySpark job. + "pysparkJob": { # A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN. # Optional. Job is a PySpark job. "archiveUris": [ # Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. "A String", ], diff --git a/docs/dyn/firebase_v1beta1.projects.androidApps.html b/docs/dyn/firebase_v1beta1.projects.androidApps.html index f868e30192..9f34fa22ac 100644 --- a/docs/dyn/firebase_v1beta1.projects.androidApps.html +++ b/docs/dyn/firebase_v1beta1.projects.androidApps.html @@ -126,7 +126,7 @@

Method Details

"appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `AndroidApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name for the `AndroidApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the AndroidApp, in the format: projects/ PROJECT_IDENTIFIER/androidApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.androidApps#AndroidApp.FIELDS.app_id)). "packageName": "A String", # Immutable. The canonical package name of the Android app as would appear in the Google Play Developer Console. "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `AndroidApp`. @@ -187,7 +187,7 @@

Method Details

"appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `AndroidApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name for the `AndroidApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the AndroidApp, in the format: projects/ PROJECT_IDENTIFIER/androidApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.androidApps#AndroidApp.FIELDS.app_id)). "packageName": "A String", # Immutable. The canonical package name of the Android app as would appear in the Google Play Developer Console. "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `AndroidApp`. @@ -245,7 +245,7 @@

Method Details

"appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `AndroidApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name for the `AndroidApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the AndroidApp, in the format: projects/ PROJECT_IDENTIFIER/androidApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.androidApps#AndroidApp.FIELDS.app_id)). "packageName": "A String", # Immutable. The canonical package name of the Android app as would appear in the Google Play Developer Console. "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `AndroidApp`. @@ -290,7 +290,7 @@

Method Details

"appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `AndroidApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name for the `AndroidApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the AndroidApp, in the format: projects/ PROJECT_IDENTIFIER/androidApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.androidApps#AndroidApp.FIELDS.app_id)). "packageName": "A String", # Immutable. The canonical package name of the Android app as would appear in the Google Play Developer Console. "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `AndroidApp`. @@ -317,7 +317,7 @@

Method Details

"appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `AndroidApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name for the `AndroidApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the AndroidApp, in the format: projects/ PROJECT_IDENTIFIER/androidApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.androidApps#AndroidApp.FIELDS.app_id)). "packageName": "A String", # Immutable. The canonical package name of the Android app as would appear in the Google Play Developer Console. "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `AndroidApp`. @@ -343,7 +343,7 @@

Method Details

{ "allowMissing": True or False, # If set to true, and the App is not found, the request will succeed but no action will be taken on the server. "etag": "A String", # Checksum provided in the AndroidApp resource. If provided, this checksum ensures that the client has an up-to-date value before proceeding. - "immediate": True or False, # Determines whether to _immediately_ delete the AndroidApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteAndroidApp. + "immediate": True or False, # Determines whether to _immediately_ delete the AndroidApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteAndroidApp. "validateOnly": True or False, # If set to true, the request is only validated. The App will _not_ be removed. } diff --git a/docs/dyn/firebase_v1beta1.projects.html b/docs/dyn/firebase_v1beta1.projects.html index a6bbd64396..9fd3502471 100644 --- a/docs/dyn/firebase_v1beta1.projects.html +++ b/docs/dyn/firebase_v1beta1.projects.html @@ -469,7 +469,7 @@

Method Details

"apiKeyId": "A String", # The globally unique, Google-assigned identifier (UID) for the Firebase API key associated with the App. Be aware that this value is the UID of the API key, _not_ the [`keyString`](https://cloud.google.com/api-keys/docs/reference/rest/v2/projects.locations.keys#Key.FIELDS.key_string) of the API key. The `keyString` is the value that can be found in the App's configuration artifact ([`AndroidApp`](../../rest/v1beta1/projects.androidApps/getConfig) | [`IosApp`](../../rest/v1beta1/projects.iosApps/getConfig) | [`WebApp`](../../rest/v1beta1/projects.webApps/getConfig)). If `api_key_id` is not set in requests to create the App ([`AndroidApp`](../../rest/v1beta1/projects.androidApps/create) | [`IosApp`](../../rest/v1beta1/projects.iosApps/create) | [`WebApp`](../../rest/v1beta1/projects.webApps/create)), then Firebase automatically associates an `api_key_id` with the App. This auto-associated key may be an existing valid key or, if no valid key exists, a new one will be provisioned. "appId": "A String", # Output only. Immutable. The globally unique, Firebase-assigned identifier for the `WebApp`. This identifier should be treated as an opaque token, as the data format is not specified. "displayName": "A String", # The user-assigned display name of the Firebase App. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the Firebase App, in the format: projects/PROJECT_ID /iosApps/APP_ID or projects/PROJECT_ID/androidApps/APP_ID or projects/ PROJECT_ID/webApps/APP_ID "namespace": "A String", # Output only. Immutable. The platform-specific identifier of the App. *Note:* For most use cases, use `appId`, which is the canonical, globally unique identifier for referencing an App. This string is derived from a native identifier for each platform: `packageName` for an `AndroidApp`, `bundleId` for an `IosApp`, and `webId` for a `WebApp`. Its contents should be treated as opaque, as the native identifier format may change as platforms evolve. This string is only unique within a `FirebaseProject` and its associated Apps. "platform": "A String", # The platform of the Firebase App. diff --git a/docs/dyn/firebase_v1beta1.projects.iosApps.html b/docs/dyn/firebase_v1beta1.projects.iosApps.html index d8d450fbe3..100ecb1502 100644 --- a/docs/dyn/firebase_v1beta1.projects.iosApps.html +++ b/docs/dyn/firebase_v1beta1.projects.iosApps.html @@ -123,7 +123,7 @@

Method Details

"bundleId": "A String", # Immutable. The canonical bundle ID of the iOS app as it would appear in the iOS AppStore. "displayName": "A String", # The user-assigned display name for the `IosApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the IosApp, in the format: projects/PROJECT_IDENTIFIER /iosApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.iosApps#IosApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `IosApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -180,7 +180,7 @@

Method Details

"bundleId": "A String", # Immutable. The canonical bundle ID of the iOS app as it would appear in the iOS AppStore. "displayName": "A String", # The user-assigned display name for the `IosApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the IosApp, in the format: projects/PROJECT_IDENTIFIER /iosApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.iosApps#IosApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `IosApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -234,7 +234,7 @@

Method Details

"bundleId": "A String", # Immutable. The canonical bundle ID of the iOS app as it would appear in the iOS AppStore. "displayName": "A String", # The user-assigned display name for the `IosApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the IosApp, in the format: projects/PROJECT_IDENTIFIER /iosApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.iosApps#IosApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `IosApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -275,7 +275,7 @@

Method Details

"bundleId": "A String", # Immutable. The canonical bundle ID of the iOS app as it would appear in the iOS AppStore. "displayName": "A String", # The user-assigned display name for the `IosApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the IosApp, in the format: projects/PROJECT_IDENTIFIER /iosApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.iosApps#IosApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `IosApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -298,7 +298,7 @@

Method Details

"bundleId": "A String", # Immutable. The canonical bundle ID of the iOS app as it would appear in the iOS AppStore. "displayName": "A String", # The user-assigned display name for the `IosApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the IosApp, in the format: projects/PROJECT_IDENTIFIER /iosApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.iosApps#IosApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `IosApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -318,7 +318,7 @@

Method Details

{ "allowMissing": True or False, # If set to true, and the App is not found, the request will succeed but no action will be taken on the server. "etag": "A String", # Checksum provided in the IosApp resource. If provided, this checksum ensures that the client has an up-to-date value before proceeding. - "immediate": True or False, # Determines whether to _immediately_ delete the IosApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteIosApp + "immediate": True or False, # Determines whether to _immediately_ delete the IosApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteIosApp "validateOnly": True or False, # If set to true, the request is only validated. The App will _not_ be removed. } diff --git a/docs/dyn/firebase_v1beta1.projects.webApps.html b/docs/dyn/firebase_v1beta1.projects.webApps.html index a4c17e070d..d19172d917 100644 --- a/docs/dyn/firebase_v1beta1.projects.webApps.html +++ b/docs/dyn/firebase_v1beta1.projects.webApps.html @@ -124,7 +124,7 @@

Method Details

], "displayName": "A String", # The user-assigned display name for the `WebApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the WebApp, in the format: projects/PROJECT_IDENTIFIER /webApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.webApps#WebApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `WebApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -182,7 +182,7 @@

Method Details

], "displayName": "A String", # The user-assigned display name for the `WebApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the WebApp, in the format: projects/PROJECT_IDENTIFIER /webApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.webApps#WebApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `WebApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -247,7 +247,7 @@

Method Details

], "displayName": "A String", # The user-assigned display name for the `WebApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the WebApp, in the format: projects/PROJECT_IDENTIFIER /webApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.webApps#WebApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `WebApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -289,7 +289,7 @@

Method Details

], "displayName": "A String", # The user-assigned display name for the `WebApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the WebApp, in the format: projects/PROJECT_IDENTIFIER /webApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.webApps#WebApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `WebApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -313,7 +313,7 @@

Method Details

], "displayName": "A String", # The user-assigned display name for the `WebApp`. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and it may be sent with update requests to ensure the client has an up-to-date value before proceeding. Learn more about `etag` in Google's [AIP-154 standard](https://google.aip.dev/154#declarative-friendly-resources). This etag is strongly validated. - "expireTime": "A String", # Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state. + "expireTime": "A String", # Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state. "name": "A String", # The resource name of the WebApp, in the format: projects/PROJECT_IDENTIFIER /webApps/APP_ID * PROJECT_IDENTIFIER: the parent Project's [`ProjectNumber`](../projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](../projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510). Note that the value for PROJECT_IDENTIFIER in any response body will be the `ProjectId`. * APP_ID: the globally unique, Firebase-assigned identifier for the App (see [`appId`](../projects.webApps#WebApp.FIELDS.app_id)). "projectId": "A String", # Output only. Immutable. A user-assigned unique identifier of the parent FirebaseProject for the `WebApp`. "state": "A String", # Output only. The lifecycle state of the App. @@ -333,7 +333,7 @@

Method Details

{ "allowMissing": True or False, # If set to true, and the App is not found, the request will succeed but no action will be taken on the server. "etag": "A String", # Checksum provided in the WebApp resource. If provided, this checksum ensures that the client has an up-to-date value before proceeding. - "immediate": True or False, # Determines whether to _immediately_ delete the WebApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteWebApp + "immediate": True or False, # Determines whether to _immediately_ delete the WebApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteWebApp "validateOnly": True or False, # If set to true, the request is only validated. The App will _not_ be removed. } diff --git a/docs/dyn/iam_v1.locations.workforcePools.html b/docs/dyn/iam_v1.locations.workforcePools.html index bc53520fe4..e02d697b31 100644 --- a/docs/dyn/iam_v1.locations.workforcePools.html +++ b/docs/dyn/iam_v1.locations.workforcePools.html @@ -133,7 +133,7 @@

Method Details

Creates a new WorkforcePool. You cannot reuse the name of a deleted pool until 30 days after deletion.
 
 Args:
-  location: string, The location of the pool to create. Format: `locations/{location}`. (required)
+  location: string, Optional. The location of the pool to create. Format: `locations/{location}`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -146,17 +146,17 @@ 

Method Details

], "disableProgrammaticSignin": True or False, # Optional. Disable programmatic sign-in by disabling token issue via the Security Token API endpoint. See [Security Token Service API] (https://cloud.google.com/iam/docs/reference/sts/rest). }, - "description": "A String", # A user-specified description of the pool. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. - "displayName": "A String", # A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the pool. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. + "displayName": "A String", # Optional. A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workforce pool will be permanently purged and cannot be recovered. "name": "A String", # Output only. The resource name of the pool. Format: `locations/{location}/workforcePools/{workforce_pool_id}` "parent": "A String", # Immutable. The resource name of the parent. Format: `organizations/{org-id}`. - "sessionDuration": "A String", # Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. + "sessionDuration": "A String", # Optional. Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. "state": "A String", # Output only. The state of the pool. } - workforcePoolId: string, The ID to use for the pool, which becomes the final component of the resource name. The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. The prefix `gcp-` is reserved for use by Google, and may not be specified. + workforcePoolId: string, Optional. The ID to use for the pool, which becomes the final component of the resource name. The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. The prefix `gcp-` is reserved for use by Google, and may not be specified. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -244,13 +244,13 @@

Method Details

], "disableProgrammaticSignin": True or False, # Optional. Disable programmatic sign-in by disabling token issue via the Security Token API endpoint. See [Security Token Service API] (https://cloud.google.com/iam/docs/reference/sts/rest). }, - "description": "A String", # A user-specified description of the pool. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. - "displayName": "A String", # A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the pool. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. + "displayName": "A String", # Optional. A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workforce pool will be permanently purged and cannot be recovered. "name": "A String", # Output only. The resource name of the pool. Format: `locations/{location}/workforcePools/{workforce_pool_id}` "parent": "A String", # Immutable. The resource name of the parent. Format: `organizations/{org-id}`. - "sessionDuration": "A String", # Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. + "sessionDuration": "A String", # Optional. Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. "state": "A String", # Output only. The state of the pool. }
@@ -341,13 +341,13 @@

Method Details

], "disableProgrammaticSignin": True or False, # Optional. Disable programmatic sign-in by disabling token issue via the Security Token API endpoint. See [Security Token Service API] (https://cloud.google.com/iam/docs/reference/sts/rest). }, - "description": "A String", # A user-specified description of the pool. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. - "displayName": "A String", # A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the pool. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. + "displayName": "A String", # Optional. A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workforce pool will be permanently purged and cannot be recovered. "name": "A String", # Output only. The resource name of the pool. Format: `locations/{location}/workforcePools/{workforce_pool_id}` "parent": "A String", # Immutable. The resource name of the parent. Format: `organizations/{org-id}`. - "sessionDuration": "A String", # Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. + "sessionDuration": "A String", # Optional. Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. "state": "A String", # Output only. The state of the pool. }, ], @@ -386,13 +386,13 @@

Method Details

], "disableProgrammaticSignin": True or False, # Optional. Disable programmatic sign-in by disabling token issue via the Security Token API endpoint. See [Security Token Service API] (https://cloud.google.com/iam/docs/reference/sts/rest). }, - "description": "A String", # A user-specified description of the pool. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. - "displayName": "A String", # A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the pool. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again. + "displayName": "A String", # Optional. A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workforce pool will be permanently purged and cannot be recovered. "name": "A String", # Output only. The resource name of the pool. Format: `locations/{location}/workforcePools/{workforce_pool_id}` "parent": "A String", # Immutable. The resource name of the parent. Format: `organizations/{org-id}`. - "sessionDuration": "A String", # Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. + "sessionDuration": "A String", # Optional. Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion. "state": "A String", # Output only. The state of the pool. } diff --git a/docs/dyn/iam_v1.locations.workforcePools.providers.html b/docs/dyn/iam_v1.locations.workforcePools.providers.html index 9b3959564d..2bf1ac10df 100644 --- a/docs/dyn/iam_v1.locations.workforcePools.providers.html +++ b/docs/dyn/iam_v1.locations.workforcePools.providers.html @@ -124,20 +124,20 @@

Method Details

The object takes the form of: { # A configuration for an external identity provider. - "attributeCondition": "A String", # A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` + "attributeCondition": "A String", # Optional. A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` "attributeMapping": { # Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.posix_username`: The Linux username used by OS Login. This is an optional field and the mapped POSIX username cannot exceed 32 characters, The key must match the regex "^a-zA-Z0-9._{0,31}$". This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {"google.subject": "assertion.sub"} ``` "a_key": "A String", }, - "description": "A String", # A user-specified description of the provider. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. - "displayName": "A String", # A user-specified display name for the provider. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the provider. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. + "displayName": "A String", # Optional. A user-specified display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload pool provider will be permanently purged and cannot be recovered. "extraAttributesOauth2Client": { # Represents the OAuth 2.0 client credential configuration for retrieving additional user attributes that are not present in the initial authentication credentials from the identity provider, e.g. groups. See https://datatracker.ietf.org/doc/html/rfc6749#section-4.4 for more details on client credentials grant flow. # Optional. The configuration for OAuth 2.0 client used to get the additional user attributes. This should be used when users can't get the desired claims in authentication credentials. Currently this configuration is only supported with OIDC protocol. "attributesType": "A String", # Required. Represents the IdP and type of claims that should be fetched. "clientId": "A String", # Required. The OAuth 2.0 client ID for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Required. The OAuth 2.0 client secret for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, @@ -149,16 +149,16 @@

Method Details

"name": "A String", # Output only. The resource name of the provider. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}` "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider configuration. "clientId": "A String", # Required. The client ID. Must match the audience claim of the JWT issued by the identity provider. - "clientSecret": { # Representation of a client secret configured for the OIDC provider. # The optional client secret. Required to enable Authorization Code flow for web sign-in. + "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Optional. The optional client secret. Required to enable Authorization Code flow for web sign-in. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, "issuerUri": "A String", # Required. The OIDC issuer URI. Must be a valid URI using the `https` scheme. - "jwksJson": "A String", # OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } + "jwksJson": "A String", # Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } "webSsoConfig": { # Configuration for web single sign-on for the OIDC provider. # Required. Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser. - "additionalScopes": [ # Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. + "additionalScopes": [ # Optional. Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. "A String", ], "assertionClaimsBehavior": "A String", # Required. The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition. @@ -251,20 +251,20 @@

Method Details

An object of the form: { # A configuration for an external identity provider. - "attributeCondition": "A String", # A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` + "attributeCondition": "A String", # Optional. A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` "attributeMapping": { # Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.posix_username`: The Linux username used by OS Login. This is an optional field and the mapped POSIX username cannot exceed 32 characters, The key must match the regex "^a-zA-Z0-9._{0,31}$". This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {"google.subject": "assertion.sub"} ``` "a_key": "A String", }, - "description": "A String", # A user-specified description of the provider. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. - "displayName": "A String", # A user-specified display name for the provider. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the provider. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. + "displayName": "A String", # Optional. A user-specified display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload pool provider will be permanently purged and cannot be recovered. "extraAttributesOauth2Client": { # Represents the OAuth 2.0 client credential configuration for retrieving additional user attributes that are not present in the initial authentication credentials from the identity provider, e.g. groups. See https://datatracker.ietf.org/doc/html/rfc6749#section-4.4 for more details on client credentials grant flow. # Optional. The configuration for OAuth 2.0 client used to get the additional user attributes. This should be used when users can't get the desired claims in authentication credentials. Currently this configuration is only supported with OIDC protocol. "attributesType": "A String", # Required. Represents the IdP and type of claims that should be fetched. "clientId": "A String", # Required. The OAuth 2.0 client ID for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Required. The OAuth 2.0 client secret for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, @@ -276,16 +276,16 @@

Method Details

"name": "A String", # Output only. The resource name of the provider. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}` "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider configuration. "clientId": "A String", # Required. The client ID. Must match the audience claim of the JWT issued by the identity provider. - "clientSecret": { # Representation of a client secret configured for the OIDC provider. # The optional client secret. Required to enable Authorization Code flow for web sign-in. + "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Optional. The optional client secret. Required to enable Authorization Code flow for web sign-in. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, "issuerUri": "A String", # Required. The OIDC issuer URI. Must be a valid URI using the `https` scheme. - "jwksJson": "A String", # OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } + "jwksJson": "A String", # Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } "webSsoConfig": { # Configuration for web single sign-on for the OIDC provider. # Required. Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser. - "additionalScopes": [ # Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. + "additionalScopes": [ # Optional. Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. "A String", ], "assertionClaimsBehavior": "A String", # Required. The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition. @@ -320,20 +320,20 @@

Method Details

"nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. "workforcePoolProviders": [ # A list of providers. { # A configuration for an external identity provider. - "attributeCondition": "A String", # A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` + "attributeCondition": "A String", # Optional. A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` "attributeMapping": { # Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.posix_username`: The Linux username used by OS Login. This is an optional field and the mapped POSIX username cannot exceed 32 characters, The key must match the regex "^a-zA-Z0-9._{0,31}$". This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {"google.subject": "assertion.sub"} ``` "a_key": "A String", }, - "description": "A String", # A user-specified description of the provider. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. - "displayName": "A String", # A user-specified display name for the provider. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the provider. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. + "displayName": "A String", # Optional. A user-specified display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload pool provider will be permanently purged and cannot be recovered. "extraAttributesOauth2Client": { # Represents the OAuth 2.0 client credential configuration for retrieving additional user attributes that are not present in the initial authentication credentials from the identity provider, e.g. groups. See https://datatracker.ietf.org/doc/html/rfc6749#section-4.4 for more details on client credentials grant flow. # Optional. The configuration for OAuth 2.0 client used to get the additional user attributes. This should be used when users can't get the desired claims in authentication credentials. Currently this configuration is only supported with OIDC protocol. "attributesType": "A String", # Required. Represents the IdP and type of claims that should be fetched. "clientId": "A String", # Required. The OAuth 2.0 client ID for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Required. The OAuth 2.0 client secret for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, @@ -345,16 +345,16 @@

Method Details

"name": "A String", # Output only. The resource name of the provider. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}` "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider configuration. "clientId": "A String", # Required. The client ID. Must match the audience claim of the JWT issued by the identity provider. - "clientSecret": { # Representation of a client secret configured for the OIDC provider. # The optional client secret. Required to enable Authorization Code flow for web sign-in. + "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Optional. The optional client secret. Required to enable Authorization Code flow for web sign-in. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, "issuerUri": "A String", # Required. The OIDC issuer URI. Must be a valid URI using the `https` scheme. - "jwksJson": "A String", # OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } + "jwksJson": "A String", # Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } "webSsoConfig": { # Configuration for web single sign-on for the OIDC provider. # Required. Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser. - "additionalScopes": [ # Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. + "additionalScopes": [ # Optional. Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. "A String", ], "assertionClaimsBehavior": "A String", # Required. The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition. @@ -394,20 +394,20 @@

Method Details

The object takes the form of: { # A configuration for an external identity provider. - "attributeCondition": "A String", # A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` + "attributeCondition": "A String", # Optional. A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ``` "attributeMapping": { # Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.posix_username`: The Linux username used by OS Login. This is an optional field and the mapped POSIX username cannot exceed 32 characters, The key must match the regex "^a-zA-Z0-9._{0,31}$". This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {"google.subject": "assertion.sub"} ``` "a_key": "A String", }, - "description": "A String", # A user-specified description of the provider. Cannot exceed 256 characters. - "disabled": True or False, # Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. - "displayName": "A String", # A user-specified display name for the provider. Cannot exceed 32 characters. + "description": "A String", # Optional. A user-specified description of the provider. Cannot exceed 256 characters. + "disabled": True or False, # Optional. Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. + "displayName": "A String", # Optional. A user-specified display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload pool provider will be permanently purged and cannot be recovered. "extraAttributesOauth2Client": { # Represents the OAuth 2.0 client credential configuration for retrieving additional user attributes that are not present in the initial authentication credentials from the identity provider, e.g. groups. See https://datatracker.ietf.org/doc/html/rfc6749#section-4.4 for more details on client credentials grant flow. # Optional. The configuration for OAuth 2.0 client used to get the additional user attributes. This should be used when users can't get the desired claims in authentication credentials. Currently this configuration is only supported with OIDC protocol. "attributesType": "A String", # Required. Represents the IdP and type of claims that should be fetched. "clientId": "A String", # Required. The OAuth 2.0 client ID for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Required. The OAuth 2.0 client secret for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, @@ -419,16 +419,16 @@

Method Details

"name": "A String", # Output only. The resource name of the provider. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}` "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider configuration. "clientId": "A String", # Required. The client ID. Must match the audience claim of the JWT issued by the identity provider. - "clientSecret": { # Representation of a client secret configured for the OIDC provider. # The optional client secret. Required to enable Authorization Code flow for web sign-in. + "clientSecret": { # Representation of a client secret configured for the OIDC provider. # Optional. The optional client secret. Required to enable Authorization Code flow for web sign-in. "value": { # Representation of the value of the client secret. # The value of the client secret. - "plainText": "A String", # Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. + "plainText": "A String", # Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response. "thumbprint": "A String", # Output only. A thumbprint to represent the current client secret value. }, }, "issuerUri": "A String", # Required. The OIDC issuer URI. Must be a valid URI using the `https` scheme. - "jwksJson": "A String", # OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } + "jwksJson": "A String", # Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { "keys": [ { "kty": "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": "", "y": "", "crv": "" } ] } "webSsoConfig": { # Configuration for web single sign-on for the OIDC provider. # Required. Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser. - "additionalScopes": [ # Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. + "additionalScopes": [ # Optional. Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. "A String", ], "assertionClaimsBehavior": "A String", # Required. The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition. diff --git a/docs/dyn/merchantapi_accounts_v1beta.accounts.shippingSettings.html b/docs/dyn/merchantapi_accounts_v1beta.accounts.shippingSettings.html index c0a16b39ce..da13109b94 100644 --- a/docs/dyn/merchantapi_accounts_v1beta.accounts.shippingSettings.html +++ b/docs/dyn/merchantapi_accounts_v1beta.accounts.shippingSettings.html @@ -153,7 +153,7 @@

Method Details

}, "warehouseBasedDeliveryTimes": [ # Optional. Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in delivery time should be set. { # Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in `delivery_time` should be set. - "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. + "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. [supported carriers](https://support.google.com/merchants/answer/7050921#zippy=%2Ccarrier-rates-au-de-uk-and-us-only) "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. The name of the service must be in the eddSupportedServices list. "warehouse": "A String", # Required. Warehouse name. This should match warehouse }, @@ -192,7 +192,7 @@

Method Details

"A String", ], "carrierRates": [ # Optional. A list of carrier rates that can be referred to by `main_table` or `single_value`. - { # A list of carrier rates that can be referred to by `main_table` or `single_value`. + { # A list of carrier rates that can be referred to by `main_table` or `single_value`. Supported carrier services are defined in https://support.google.com/merchants/answer/12577710?hl=en&ref_topic=12570808&sjid=10662598224319463032-NC#zippy=%2Cdelivery-cost-rate-type%2Ccarrier-rate-au-de-uk-and-us-only. "carrier": "A String", # Required. Carrier service, such as `"UPS"` or `"Fedex"`. "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. "flatAdjustment": { # The price represented as a number and currency. # Optional. Additive shipping rate modifier. Can be negative. For example `{ "amount_micros": 1, "currency_code" : "USD" }` adds $1 to the rate, `{ "amount_micros": -3, "currency_code" : "USD" }` removes $3 from the rate. @@ -471,7 +471,7 @@

Method Details

}, "warehouseBasedDeliveryTimes": [ # Optional. Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in delivery time should be set. { # Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in `delivery_time` should be set. - "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. + "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. [supported carriers](https://support.google.com/merchants/answer/7050921#zippy=%2Ccarrier-rates-au-de-uk-and-us-only) "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. The name of the service must be in the eddSupportedServices list. "warehouse": "A String", # Required. Warehouse name. This should match warehouse }, @@ -510,7 +510,7 @@

Method Details

"A String", ], "carrierRates": [ # Optional. A list of carrier rates that can be referred to by `main_table` or `single_value`. - { # A list of carrier rates that can be referred to by `main_table` or `single_value`. + { # A list of carrier rates that can be referred to by `main_table` or `single_value`. Supported carrier services are defined in https://support.google.com/merchants/answer/12577710?hl=en&ref_topic=12570808&sjid=10662598224319463032-NC#zippy=%2Cdelivery-cost-rate-type%2Ccarrier-rate-au-de-uk-and-us-only. "carrier": "A String", # Required. Carrier service, such as `"UPS"` or `"Fedex"`. "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. "flatAdjustment": { # The price represented as a number and currency. # Optional. Additive shipping rate modifier. Can be negative. For example `{ "amount_micros": 1, "currency_code" : "USD" }` adds $1 to the rate, `{ "amount_micros": -3, "currency_code" : "USD" }` removes $3 from the rate. @@ -787,7 +787,7 @@

Method Details

}, "warehouseBasedDeliveryTimes": [ # Optional. Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in delivery time should be set. { # Indicates that the delivery time should be calculated per warehouse (shipping origin location) based on the settings of the selected carrier. When set, no other transit time related field in `delivery_time` should be set. - "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. + "carrier": "A String", # Required. Carrier, such as `"UPS"` or `"Fedex"`. [supported carriers](https://support.google.com/merchants/answer/7050921#zippy=%2Ccarrier-rates-au-de-uk-and-us-only) "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. The name of the service must be in the eddSupportedServices list. "warehouse": "A String", # Required. Warehouse name. This should match warehouse }, @@ -826,7 +826,7 @@

Method Details

"A String", ], "carrierRates": [ # Optional. A list of carrier rates that can be referred to by `main_table` or `single_value`. - { # A list of carrier rates that can be referred to by `main_table` or `single_value`. + { # A list of carrier rates that can be referred to by `main_table` or `single_value`. Supported carrier services are defined in https://support.google.com/merchants/answer/12577710?hl=en&ref_topic=12570808&sjid=10662598224319463032-NC#zippy=%2Cdelivery-cost-rate-type%2Ccarrier-rate-au-de-uk-and-us-only. "carrier": "A String", # Required. Carrier service, such as `"UPS"` or `"Fedex"`. "carrierService": "A String", # Required. Carrier service, such as `"ground"` or `"2 days"`. "flatAdjustment": { # The price represented as a number and currency. # Optional. Additive shipping rate modifier. Can be negative. For example `{ "amount_micros": 1, "currency_code" : "USD" }` adds $1 to the rate, `{ "amount_micros": -3, "currency_code" : "USD" }` removes $3 from the rate. diff --git a/docs/dyn/osconfig_v2beta.folders.html b/docs/dyn/osconfig_v2beta.folders.html new file mode 100644 index 0000000000..fe346023a6 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.folders.html @@ -0,0 +1,91 @@ + + + +

OS Config API . folders

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.folders.locations.global_.html b/docs/dyn/osconfig_v2beta.folders.locations.global_.html new file mode 100644 index 0000000000..4fc9f9ddd5 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.folders.locations.global_.html @@ -0,0 +1,91 @@ + + + +

OS Config API . folders . locations . global_

+

Instance Methods

+

+ policyOrchestrators() +

+

Returns the policyOrchestrators Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.folders.locations.global_.policyOrchestrators.html b/docs/dyn/osconfig_v2beta.folders.locations.global_.policyOrchestrators.html new file mode 100644 index 0000000000..b61abe99db --- /dev/null +++ b/docs/dyn/osconfig_v2beta.folders.locations.global_.policyOrchestrators.html @@ -0,0 +1,1468 @@ + + + +

OS Config API . folders . locations . global_ . policyOrchestrators

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None)

+

Creates a new policy orchestrator under the given folder resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).

+

+ delete(name, etag=None, requestId=None, x__xgafv=None)

+

Deletes an existing policy orchestrator resource, parented by a folder.

+

+ get(name, x__xgafv=None)

+

Retrieves an existing policy orchestrator, parented by a folder.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the policy orchestrators under the given parent folder resource.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an existing policy orchestrator, parented by a folder.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None) +
Creates a new policy orchestrator under the given folder resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).
+
+Args:
+  parent: string, Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global' (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  policyOrchestratorId: string, Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, etag=None, requestId=None, x__xgafv=None) +
Deletes an existing policy orchestrator resource, parented by a folder.
+
+Args:
+  name: string, Required. Name of the resource to be deleted. (required)
+  etag: string, Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Retrieves an existing policy orchestrator, parented by a folder.
+
+Args:
+  name: string, Required. The resource name. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the policy orchestrators under the given parent folder resource.
+
+Args:
+  parent: string, Required. The parent resource name. (required)
+  filter: string, Optional. Filtering results
+  orderBy: string, Optional. Hint for how to order the results
+  pageSize: integer, Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.
+  pageToken: string, Optional. A token identifying a page of results the server should return.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for the list policy orchestrator resources.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "policyOrchestrators": [ # The policy orchestrators for the specified parent resource.
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+      "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+      "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+      "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+      "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "labels": { # Optional. Labels as key value pairs
+        "a_key": "A String",
+      },
+      "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+      "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+        "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+        "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+          "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+          "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+          "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+          "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+          "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+            "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+            "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+              { # VM inventory details.
+                "osShortName": "A String", # Required. The OS short name
+                "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+              },
+            ],
+          },
+          "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+          "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+            { # An OS policy defines the desired state configuration for a VM.
+              "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+              "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+              "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+              "mode": "A String", # Required. Policy mode
+              "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+                { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+                  "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                    { # Filtering criteria to select VMs based on inventory details.
+                      "osShortName": "A String", # Required. The OS short name
+                      "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                    },
+                  ],
+                  "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                    { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                      "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                        "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                        "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                      },
+                      "file": { # A resource that manages the state of a file. # File resource
+                        "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                        "file": { # A remote or local file. # A remote or local source.
+                          "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                          "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                            "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                            "generation": "A String", # Generation number of the Cloud Storage object.
+                            "object": "A String", # Required. Name of the Cloud Storage object.
+                          },
+                          "localPath": "A String", # A local path within the VM to use.
+                          "remote": { # Specifies a file available via some URI. # A generic remote file.
+                            "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                            "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                          },
+                        },
+                        "path": "A String", # Required. The absolute path of the file within the VM.
+                        "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                        "state": "A String", # Required. Desired state of the file.
+                      },
+                      "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                      "pkg": { # A resource that manages a system package. # Package resource
+                        "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                          "source": { # A remote or local file. # Required. A deb package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                        "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                          "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                            "A String",
+                          ],
+                          "source": { # A remote or local file. # Required. The MSI package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                          "source": { # A remote or local file. # Required. An rpm package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                          "name": "A String", # Required. Package name.
+                        },
+                      },
+                      "repository": { # A resource that manages a package repository. # Package repository resource
+                        "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                          "archiveType": "A String", # Required. Type of archive files in this repository.
+                          "components": [ # Required. List of components for this repository. Must contain at least one item.
+                            "A String",
+                          ],
+                          "distribution": "A String", # Required. Distribution of this repository.
+                          "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                          "uri": "A String", # Required. URI for this repository.
+                        },
+                        "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                          "name": "A String", # Required. The name of the repository.
+                          "url": "A String", # Required. The url of the repository.
+                        },
+                        "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                        },
+                        "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                        },
+                      },
+                    },
+                  ],
+                },
+              ],
+            },
+          ],
+          "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+          "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+          "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+          "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+            "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+              "fixed": 42, # Specifies a fixed value.
+              "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+            },
+            "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+          },
+          "rolloutState": "A String", # Output only. OS policy assignment rollout state
+          "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+        },
+      },
+      "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+        "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+          { # Selector for the resources in scope of orchestration.
+            "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+              "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+                "A String",
+              ],
+            },
+            "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+              "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+                "A String",
+              ],
+              "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+                "A String",
+              ],
+            },
+          },
+        ],
+      },
+      "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+        "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+        "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+      },
+      "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+      "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+      "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+    },
+  ],
+  "unreachable": [ # Locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an existing policy orchestrator, parented by a folder.
+
+Args:
+  name: string, Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.folders.locations.html b/docs/dyn/osconfig_v2beta.folders.locations.html new file mode 100644 index 0000000000..c605332aba --- /dev/null +++ b/docs/dyn/osconfig_v2beta.folders.locations.html @@ -0,0 +1,96 @@ + + + +

OS Config API . folders . locations

+

Instance Methods

+

+ global_() +

+

Returns the global_ Resource.

+ +

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.folders.locations.operations.html b/docs/dyn/osconfig_v2beta.folders.locations.operations.html new file mode 100644 index 0000000000..097c0b7cfa --- /dev/null +++ b/docs/dyn/osconfig_v2beta.folders.locations.operations.html @@ -0,0 +1,235 @@ + + + +

OS Config API . folders . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.html b/docs/dyn/osconfig_v2beta.html new file mode 100644 index 0000000000..826208d10c --- /dev/null +++ b/docs/dyn/osconfig_v2beta.html @@ -0,0 +1,121 @@ + + + +

OS Config API

+

Instance Methods

+

+ folders() +

+

Returns the folders Resource.

+ +

+ organizations() +

+

Returns the organizations Resource.

+ +

+ projects() +

+

Returns the projects Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ new_batch_http_request()

+

Create a BatchHttpRequest object based on the discovery document.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ new_batch_http_request() +
Create a BatchHttpRequest object based on the discovery document.
+
+                Args:
+                  callback: callable, A callback to be called for each response, of the
+                    form callback(id, response, exception). The first parameter is the
+                    request id, and the second is the deserialized response object. The
+                    third is an apiclient.errors.HttpError exception object if an HTTP
+                    error occurred while processing the request, or None if no error
+                    occurred.
+
+                Returns:
+                  A BatchHttpRequest object based on the discovery document.
+                
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.organizations.html b/docs/dyn/osconfig_v2beta.organizations.html new file mode 100644 index 0000000000..af2cc5d98f --- /dev/null +++ b/docs/dyn/osconfig_v2beta.organizations.html @@ -0,0 +1,91 @@ + + + +

OS Config API . organizations

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.organizations.locations.global_.html b/docs/dyn/osconfig_v2beta.organizations.locations.global_.html new file mode 100644 index 0000000000..105cc78ed5 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.organizations.locations.global_.html @@ -0,0 +1,91 @@ + + + +

OS Config API . organizations . locations . global_

+

Instance Methods

+

+ policyOrchestrators() +

+

Returns the policyOrchestrators Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.organizations.locations.global_.policyOrchestrators.html b/docs/dyn/osconfig_v2beta.organizations.locations.global_.policyOrchestrators.html new file mode 100644 index 0000000000..c8e57689fa --- /dev/null +++ b/docs/dyn/osconfig_v2beta.organizations.locations.global_.policyOrchestrators.html @@ -0,0 +1,1468 @@ + + + +

OS Config API . organizations . locations . global_ . policyOrchestrators

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None)

+

Creates a new policy orchestrator under the given organizations resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).

+

+ delete(name, etag=None, requestId=None, x__xgafv=None)

+

Deletes an existing policy orchestrator resource, parented by an organization.

+

+ get(name, x__xgafv=None)

+

Retrieves an existing policy orchestrator, parented by an organization.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the policy orchestrators under the given parent organization resource.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an existing policy orchestrator, parented by an organization.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None) +
Creates a new policy orchestrator under the given organizations resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).
+
+Args:
+  parent: string, Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global' (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  policyOrchestratorId: string, Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, etag=None, requestId=None, x__xgafv=None) +
Deletes an existing policy orchestrator resource, parented by an organization.
+
+Args:
+  name: string, Required. Name of the resource to be deleted. (required)
+  etag: string, Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Retrieves an existing policy orchestrator, parented by an organization.
+
+Args:
+  name: string, Required. The resource name. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the policy orchestrators under the given parent organization resource.
+
+Args:
+  parent: string, Required. The parent resource name. (required)
+  filter: string, Optional. Filtering results
+  orderBy: string, Optional. Hint for how to order the results
+  pageSize: integer, Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.
+  pageToken: string, Optional. A token identifying a page of results the server should return.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for the list policy orchestrator resources.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "policyOrchestrators": [ # The policy orchestrators for the specified parent resource.
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+      "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+      "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+      "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+      "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "labels": { # Optional. Labels as key value pairs
+        "a_key": "A String",
+      },
+      "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+      "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+        "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+        "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+          "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+          "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+          "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+          "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+          "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+            "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+            "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+              { # VM inventory details.
+                "osShortName": "A String", # Required. The OS short name
+                "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+              },
+            ],
+          },
+          "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+          "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+            { # An OS policy defines the desired state configuration for a VM.
+              "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+              "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+              "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+              "mode": "A String", # Required. Policy mode
+              "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+                { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+                  "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                    { # Filtering criteria to select VMs based on inventory details.
+                      "osShortName": "A String", # Required. The OS short name
+                      "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                    },
+                  ],
+                  "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                    { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                      "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                        "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                        "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                      },
+                      "file": { # A resource that manages the state of a file. # File resource
+                        "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                        "file": { # A remote or local file. # A remote or local source.
+                          "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                          "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                            "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                            "generation": "A String", # Generation number of the Cloud Storage object.
+                            "object": "A String", # Required. Name of the Cloud Storage object.
+                          },
+                          "localPath": "A String", # A local path within the VM to use.
+                          "remote": { # Specifies a file available via some URI. # A generic remote file.
+                            "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                            "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                          },
+                        },
+                        "path": "A String", # Required. The absolute path of the file within the VM.
+                        "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                        "state": "A String", # Required. Desired state of the file.
+                      },
+                      "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                      "pkg": { # A resource that manages a system package. # Package resource
+                        "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                          "source": { # A remote or local file. # Required. A deb package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                        "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                          "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                            "A String",
+                          ],
+                          "source": { # A remote or local file. # Required. The MSI package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                          "source": { # A remote or local file. # Required. An rpm package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                          "name": "A String", # Required. Package name.
+                        },
+                      },
+                      "repository": { # A resource that manages a package repository. # Package repository resource
+                        "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                          "archiveType": "A String", # Required. Type of archive files in this repository.
+                          "components": [ # Required. List of components for this repository. Must contain at least one item.
+                            "A String",
+                          ],
+                          "distribution": "A String", # Required. Distribution of this repository.
+                          "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                          "uri": "A String", # Required. URI for this repository.
+                        },
+                        "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                          "name": "A String", # Required. The name of the repository.
+                          "url": "A String", # Required. The url of the repository.
+                        },
+                        "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                        },
+                        "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                        },
+                      },
+                    },
+                  ],
+                },
+              ],
+            },
+          ],
+          "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+          "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+          "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+          "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+            "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+              "fixed": 42, # Specifies a fixed value.
+              "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+            },
+            "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+          },
+          "rolloutState": "A String", # Output only. OS policy assignment rollout state
+          "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+        },
+      },
+      "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+        "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+          { # Selector for the resources in scope of orchestration.
+            "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+              "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+                "A String",
+              ],
+            },
+            "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+              "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+                "A String",
+              ],
+              "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+                "A String",
+              ],
+            },
+          },
+        ],
+      },
+      "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+        "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+        "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+      },
+      "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+      "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+      "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+    },
+  ],
+  "unreachable": [ # Locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an existing policy orchestrator, parented by an organization.
+
+Args:
+  name: string, Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.organizations.locations.html b/docs/dyn/osconfig_v2beta.organizations.locations.html new file mode 100644 index 0000000000..8845377166 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.organizations.locations.html @@ -0,0 +1,96 @@ + + + +

OS Config API . organizations . locations

+

Instance Methods

+

+ global_() +

+

Returns the global_ Resource.

+ +

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.organizations.locations.operations.html b/docs/dyn/osconfig_v2beta.organizations.locations.operations.html new file mode 100644 index 0000000000..be0e40dd70 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.organizations.locations.operations.html @@ -0,0 +1,235 @@ + + + +

OS Config API . organizations . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.projects.html b/docs/dyn/osconfig_v2beta.projects.html new file mode 100644 index 0000000000..de602b6e50 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.projects.html @@ -0,0 +1,91 @@ + + + +

OS Config API . projects

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.projects.locations.global_.html b/docs/dyn/osconfig_v2beta.projects.locations.global_.html new file mode 100644 index 0000000000..d1eaa3cc8c --- /dev/null +++ b/docs/dyn/osconfig_v2beta.projects.locations.global_.html @@ -0,0 +1,91 @@ + + + +

OS Config API . projects . locations . global_

+

Instance Methods

+

+ policyOrchestrators() +

+

Returns the policyOrchestrators Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.projects.locations.global_.policyOrchestrators.html b/docs/dyn/osconfig_v2beta.projects.locations.global_.policyOrchestrators.html new file mode 100644 index 0000000000..f01387e15d --- /dev/null +++ b/docs/dyn/osconfig_v2beta.projects.locations.global_.policyOrchestrators.html @@ -0,0 +1,1468 @@ + + + +

OS Config API . projects . locations . global_ . policyOrchestrators

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None)

+

Creates a new policy orchestrator under the given project resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).

+

+ delete(name, etag=None, requestId=None, x__xgafv=None)

+

Deletes an existing policy orchestrator resource, parented by a project.

+

+ get(name, x__xgafv=None)

+

Retrieves an existing policy orchestrator, parented by a project.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the policy orchestrators under the given parent project resource.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an existing policy orchestrator, parented by a project.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, policyOrchestratorId=None, requestId=None, x__xgafv=None) +
Creates a new policy orchestrator under the given project resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).
+
+Args:
+  parent: string, Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global' (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  policyOrchestratorId: string, Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, etag=None, requestId=None, x__xgafv=None) +
Deletes an existing policy orchestrator resource, parented by a project.
+
+Args:
+  name: string, Required. Name of the resource to be deleted. (required)
+  etag: string, Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Retrieves an existing policy orchestrator, parented by a project.
+
+Args:
+  name: string, Required. The resource name. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the policy orchestrators under the given parent project resource.
+
+Args:
+  parent: string, Required. The parent resource name. (required)
+  filter: string, Optional. Filtering results
+  orderBy: string, Optional. Hint for how to order the results
+  pageSize: integer, Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.
+  pageToken: string, Optional. A token identifying a page of results the server should return.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for the list policy orchestrator resources.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "policyOrchestrators": [ # The policy orchestrators for the specified parent resource.
+    { # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+      "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+      "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+      "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+      "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "labels": { # Optional. Labels as key value pairs
+        "a_key": "A String",
+      },
+      "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+      "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+        "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+        "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+          "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+          "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+          "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+          "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+          "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+            "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+            "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+              { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+                "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+              { # VM inventory details.
+                "osShortName": "A String", # Required. The OS short name
+                "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+              },
+            ],
+          },
+          "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+          "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+            { # An OS policy defines the desired state configuration for a VM.
+              "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+              "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+              "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+              "mode": "A String", # Required. Policy mode
+              "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+                { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+                  "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                    { # Filtering criteria to select VMs based on inventory details.
+                      "osShortName": "A String", # Required. The OS short name
+                      "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                    },
+                  ],
+                  "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                    { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                      "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                        "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                        "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                          "args": [ # Optional arguments to pass to the source during execution.
+                            "A String",
+                          ],
+                          "file": { # A remote or local file. # A remote or local file.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                          "interpreter": "A String", # Required. The script interpreter to use.
+                          "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                          "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                        },
+                      },
+                      "file": { # A resource that manages the state of a file. # File resource
+                        "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                        "file": { # A remote or local file. # A remote or local source.
+                          "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                          "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                            "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                            "generation": "A String", # Generation number of the Cloud Storage object.
+                            "object": "A String", # Required. Name of the Cloud Storage object.
+                          },
+                          "localPath": "A String", # A local path within the VM to use.
+                          "remote": { # Specifies a file available via some URI. # A generic remote file.
+                            "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                            "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                          },
+                        },
+                        "path": "A String", # Required. The absolute path of the file within the VM.
+                        "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                        "state": "A String", # Required. Desired state of the file.
+                      },
+                      "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                      "pkg": { # A resource that manages a system package. # Package resource
+                        "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                          "source": { # A remote or local file. # Required. A deb package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                        "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                          "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                            "A String",
+                          ],
+                          "source": { # A remote or local file. # Required. The MSI package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                          "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                          "source": { # A remote or local file. # Required. An rpm package.
+                            "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                            "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                              "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                              "generation": "A String", # Generation number of the Cloud Storage object.
+                              "object": "A String", # Required. Name of the Cloud Storage object.
+                            },
+                            "localPath": "A String", # A local path within the VM to use.
+                            "remote": { # Specifies a file available via some URI. # A generic remote file.
+                              "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                              "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                            },
+                          },
+                        },
+                        "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                          "name": "A String", # Required. Package name.
+                        },
+                        "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                          "name": "A String", # Required. Package name.
+                        },
+                      },
+                      "repository": { # A resource that manages a package repository. # Package repository resource
+                        "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                          "archiveType": "A String", # Required. Type of archive files in this repository.
+                          "components": [ # Required. List of components for this repository. Must contain at least one item.
+                            "A String",
+                          ],
+                          "distribution": "A String", # Required. Distribution of this repository.
+                          "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                          "uri": "A String", # Required. URI for this repository.
+                        },
+                        "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                          "name": "A String", # Required. The name of the repository.
+                          "url": "A String", # Required. The url of the repository.
+                        },
+                        "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                        },
+                        "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                          "baseUrl": "A String", # Required. The location of the repository directory.
+                          "displayName": "A String", # The display name of the repository.
+                          "gpgKeys": [ # URIs of GPG keys.
+                            "A String",
+                          ],
+                          "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                        },
+                      },
+                    },
+                  ],
+                },
+              ],
+            },
+          ],
+          "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+          "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+          "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+          "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+            "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+              "fixed": 42, # Specifies a fixed value.
+              "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+            },
+            "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+          },
+          "rolloutState": "A String", # Output only. OS policy assignment rollout state
+          "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+        },
+      },
+      "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+        "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+          { # Selector for the resources in scope of orchestration.
+            "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+              "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+                "A String",
+              ],
+            },
+            "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+              "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+                "A String",
+              ],
+              "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+                "A String",
+              ],
+            },
+          },
+        ],
+      },
+      "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+        "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+        "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+          "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+            "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+              {
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+            ],
+            "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+          },
+          "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+          "finishTime": "A String", # Output only. Finish time of the wave iteration.
+          "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+          "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+          "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+          "startTime": "A String", # Output only. Start time of the wave iteration.
+          "state": "A String", # Output only. State of the iteration.
+        },
+      },
+      "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+      "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+      "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+    },
+  ],
+  "unreachable": [ # Locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an existing policy orchestrator, parented by a project.
+
+Args:
+  name: string, Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not "manage" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.
+  "action": "A String", # Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist
+  "createTime": "A String", # Output only. Timestamp when the policy orchestrator resource was created.
+  "description": "A String", # Optional. Freeform text describing the purpose of the resource.
+  "etag": "A String", # Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels as key value pairs
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`
+  "orchestratedResource": { # Represents a resource that is being orchestrated by the policy orchestrator. # Required. Resource to be orchestrated by the policy orchestrator.
+    "id": "A String", # Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.
+    "osPolicyAssignmentV1Payload": { # OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). # Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource.
+      "baseline": True or False, # Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.
+      "deleted": True or False, # Output only. Indicates that this revision deletes the OS policy assignment.
+      "description": "A String", # OS policy assignment description. Length of the description is limited to 1024 characters.
+      "etag": "A String", # The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.
+      "instanceFilter": { # Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them. # Required. Filter to select VMs.
+        "all": True or False, # Target all VMs in the project. If true, no other criteria is permitted.
+        "exclusionLabels": [ # List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inclusionLabels": [ # List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.
+          { # Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.
+            "labels": { # Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.
+              "a_key": "A String",
+            },
+          },
+        ],
+        "inventories": [ # List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.
+          { # VM inventory details.
+            "osShortName": "A String", # Required. The OS short name
+            "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+          },
+        ],
+      },
+      "name": "A String", # Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.
+      "osPolicies": [ # Required. List of OS policies to be applied to the VMs.
+        { # An OS policy defines the desired state configuration for a VM.
+          "allowNoResourceGroupMatch": True or False, # This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.
+          "description": "A String", # Policy description. Length of the description is limited to 1024 characters.
+          "id": "A String", # Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.
+          "mode": "A String", # Required. Policy mode
+          "resourceGroups": [ # Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`
+            { # Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.
+              "inventoryFilters": [ # List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.
+                { # Filtering criteria to select VMs based on inventory details.
+                  "osShortName": "A String", # Required. The OS short name
+                  "osVersion": "A String", # The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.
+                },
+              ],
+              "resources": [ # Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.
+                { # An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.
+                  "exec": { # A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen. # Exec resource
+                    "enforce": { # A file or script to execute. # What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                    "validate": { # A file or script to execute. # Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.
+                      "args": [ # Optional arguments to pass to the source during execution.
+                        "A String",
+                      ],
+                      "file": { # A remote or local file. # A remote or local file.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                      "interpreter": "A String", # Required. The script interpreter to use.
+                      "outputFilePath": "A String", # Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.
+                      "script": "A String", # An inline script. The size of the script is limited to 32KiB.
+                    },
+                  },
+                  "file": { # A resource that manages the state of a file. # File resource
+                    "content": "A String", # A a file with this content. The size of the content is limited to 32KiB.
+                    "file": { # A remote or local file. # A remote or local source.
+                      "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                      "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                        "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                        "generation": "A String", # Generation number of the Cloud Storage object.
+                        "object": "A String", # Required. Name of the Cloud Storage object.
+                      },
+                      "localPath": "A String", # A local path within the VM to use.
+                      "remote": { # Specifies a file available via some URI. # A generic remote file.
+                        "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                        "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                      },
+                    },
+                    "path": "A String", # Required. The absolute path of the file within the VM.
+                    "permissions": "A String", # Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4
+                    "state": "A String", # Required. Desired state of the file.
+                  },
+                  "id": "A String", # Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.
+                  "pkg": { # A resource that manages a system package. # Package resource
+                    "apt": { # A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]` # A package managed by Apt.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "deb": { # A deb package file. dpkg packages only support INSTALLED state. # A deb package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`
+                      "source": { # A remote or local file. # Required. A deb package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "desiredState": "A String", # Required. The desired state the agent should maintain for this package.
+                    "googet": { # A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package` # A package managed by GooGet.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "msi": { # An MSI package. MSI packages only support INSTALLED state. # An MSI package.
+                      "properties": [ # Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.
+                        "A String",
+                      ],
+                      "source": { # A remote or local file. # Required. The MSI package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "rpm": { # An RPM package file. RPM packages only support INSTALLED state. # An rpm package file.
+                      "pullDeps": True or False, # Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`
+                      "source": { # A remote or local file. # Required. An rpm package.
+                        "allowInsecure": True or False, # Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.
+                        "gcs": { # Specifies a file available as a Cloud Storage Object. # A Cloud Storage object.
+                          "bucket": "A String", # Required. Bucket of the Cloud Storage object.
+                          "generation": "A String", # Generation number of the Cloud Storage object.
+                          "object": "A String", # Required. Name of the Cloud Storage object.
+                        },
+                        "localPath": "A String", # A local path within the VM to use.
+                        "remote": { # Specifies a file available via some URI. # A generic remote file.
+                          "sha256Checksum": "A String", # SHA256 checksum of the remote file.
+                          "uri": "A String", # Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.
+                        },
+                      },
+                    },
+                    "yum": { # A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package` # A package managed by YUM.
+                      "name": "A String", # Required. Package name.
+                    },
+                    "zypper": { # A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package` # A package managed by Zypper.
+                      "name": "A String", # Required. Package name.
+                    },
+                  },
+                  "repository": { # A resource that manages a package repository. # Package repository resource
+                    "apt": { # Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`. # An Apt Repository.
+                      "archiveType": "A String", # Required. Type of archive files in this repository.
+                      "components": [ # Required. List of components for this repository. Must contain at least one item.
+                        "A String",
+                      ],
+                      "distribution": "A String", # Required. Distribution of this repository.
+                      "gpgKey": "A String", # URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.
+                      "uri": "A String", # Required. URI for this repository.
+                    },
+                    "goo": { # Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`. # A Goo Repository.
+                      "name": "A String", # Required. The name of the repository.
+                      "url": "A String", # Required. The url of the repository.
+                    },
+                    "yum": { # Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`. # A Yum Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.
+                    },
+                    "zypper": { # Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`. # A Zypper Repository.
+                      "baseUrl": "A String", # Required. The location of the repository directory.
+                      "displayName": "A String", # The display name of the repository.
+                      "gpgKeys": [ # URIs of GPG keys.
+                        "A String",
+                      ],
+                      "id": "A String", # Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.
+                    },
+                  },
+                },
+              ],
+            },
+          ],
+        },
+      ],
+      "reconciling": True or False, # Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING
+      "revisionCreateTime": "A String", # Output only. The timestamp that the revision was created.
+      "revisionId": "A String", # Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment
+      "rollout": { # Message to configure the rollout at the zonal level for the OS policy assignment. # Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.
+        "disruptionBudget": { # Message encapsulating a value that can be either absolute ("fixed") or relative ("percent") to a value. # Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.
+          "fixed": 42, # Specifies a fixed value.
+          "percent": 42, # Specifies the relative value defined as a percentage, which will be multiplied by a reference value.
+        },
+        "minWaitDuration": "A String", # Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.
+      },
+      "rolloutState": "A String", # Output only. OS policy assignment rollout state
+      "uid": "A String", # Output only. Server generated unique id for the OS policy assignment resource.
+    },
+  },
+  "orchestrationScope": { # Defines a set of selectors which drive which resources are in scope of policy orchestration. # Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors.
+    "selectors": [ # Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.
+      { # Selector for the resources in scope of orchestration.
+        "locationSelector": { # Selector containing locations in scope. # Selector for selecting locations.
+          "includedLocations": [ # Optional. Names of the locations in scope. Format: `us-central1-a`
+            "A String",
+          ],
+        },
+        "resourceHierarchySelector": { # Selector containing Cloud Resource Manager resource hierarchy nodes. # Selector for selecting resource hierarchy.
+          "includedFolders": [ # Optional. Names of the folders in scope. Format: `folders/{folder_id}`
+            "A String",
+          ],
+          "includedProjects": [ # Optional. Names of the projects in scope. Format: `projects/{project_number}`
+            "A String",
+          ],
+        },
+      },
+    ],
+  },
+  "orchestrationState": { # Describes the state of the orchestration process. # Output only. State of the orchestration.
+    "currentIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Current Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+    "previousIterationState": { # Describes the state of a single iteration of the orchestrator. # Output only. Previous Wave iteration state.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Error thrown in the wave iteration.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "failedActions": "A String", # Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.
+      "finishTime": "A String", # Output only. Finish time of the wave iteration.
+      "performedActions": "A String", # Output only. Overall number of actions done by the orchestrator so far.
+      "progress": 3.14, # Output only. An estimated percentage of the progress. Number between 0 and 100.
+      "rolloutResource": "A String", # Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.
+      "startTime": "A String", # Output only. Start time of the wave iteration.
+      "state": "A String", # Output only. State of the iteration.
+    },
+  },
+  "reconciling": True or False, # Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.
+  "state": "A String", # Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.
+  "updateTime": "A String", # Output only. Timestamp when the policy orchestrator resource was last modified.
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.projects.locations.html b/docs/dyn/osconfig_v2beta.projects.locations.html new file mode 100644 index 0000000000..d2bc4564b6 --- /dev/null +++ b/docs/dyn/osconfig_v2beta.projects.locations.html @@ -0,0 +1,96 @@ + + + +

OS Config API . projects . locations

+

Instance Methods

+

+ global_() +

+

Returns the global_ Resource.

+ +

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/osconfig_v2beta.projects.locations.operations.html b/docs/dyn/osconfig_v2beta.projects.locations.operations.html new file mode 100644 index 0000000000..3ca4a0056f --- /dev/null +++ b/docs/dyn/osconfig_v2beta.projects.locations.operations.html @@ -0,0 +1,235 @@ + + + +

OS Config API . projects . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json index b9a25ca536..cb4a3147ea 100644 --- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json @@ -423,7 +423,7 @@ } } }, -"revision": "20240701", +"revision": "20241028", "rootUrl": "https://alertcenter.googleapis.com/", "schemas": { "AbuseDetected": { @@ -856,7 +856,7 @@ "type": "object" }, "AppSettingsChanged": { -"description": "Alerts from AppSettingsChanged bucket Rules configured by Admin which contain the below rules. Calendar settings changed Drive settings changed Email settings changed Mobile settings changed", +"description": "* Alerts from AppSettingsChanged bucket Rules configured by Admin which contain the below rules. Calendar settings changed Drive settings changed Email settings changed Mobile settings changed", "id": "AppSettingsChanged", "properties": { "alertDetails": { @@ -1577,7 +1577,7 @@ "type": "object" }, "PrimaryAdminChangedEvent": { -"description": "Event occurred when primary admin changed in customer's account. The event are being received from insight forwarder", +"description": "* Event occurred when primary admin changed in customer's account. The event are being received from insight forwarder", "id": "PrimaryAdminChangedEvent", "properties": { "domain": { @@ -1888,7 +1888,7 @@ "type": "object" }, "SSOProfileCreatedEvent": { -"description": "Event occurred when SSO Profile created in customer's account. The event are being received from insight forwarder", +"description": "* Event occurred when SSO Profile created in customer's account. The event are being received from insight forwarder", "id": "SSOProfileCreatedEvent", "properties": { "inboundSsoProfileName": { @@ -1899,7 +1899,7 @@ "type": "object" }, "SSOProfileDeletedEvent": { -"description": "Event occurred when SSO Profile deleted in customer's account. The event are being received from insight forwarder", +"description": "* Event occurred when SSO Profile deleted in customer's account. The event are being received from insight forwarder", "id": "SSOProfileDeletedEvent", "properties": { "inboundSsoProfileName": { @@ -1910,7 +1910,7 @@ "type": "object" }, "SSOProfileUpdatedEvent": { -"description": "Event occurred when SSO Profile updated in customer's account. The event are being received from insight forwarder", +"description": "* Event occurred when SSO Profile updated in customer's account. The event are being received from insight forwarder", "id": "SSOProfileUpdatedEvent", "properties": { "inboundSsoProfileChanges": { @@ -2013,7 +2013,7 @@ "type": "object" }, "SuperAdminPasswordResetEvent": { -"description": "Event occurred when password was reset for super admin in customer's account. The event are being received from insight forwarder", +"description": "* Event occurred when password was reset for super admin in customer's account. The event are being received from insight forwarder", "id": "SuperAdminPasswordResetEvent", "properties": { "userEmail": { @@ -2193,7 +2193,7 @@ "type": "object" }, "UserChanges": { -"description": "Alerts from UserChanges bucket Rules for predefined rules which contain the below rules. Suspended user made active New user Added User suspended (by admin) User granted admin privileges User admin privileges revoked User deleted Users password changed", +"description": "* Alerts from UserChanges bucket Rules for predefined rules which contain the below rules. Suspended user made active New user Added User suspended (by admin) User granted admin privileges User admin privileges revoked User deleted Users password changed", "id": "UserChanges", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json index 196e6d8d2a..1809c8cb50 100644 --- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json @@ -2654,7 +2654,7 @@ } } }, -"revision": "20241021", +"revision": "20241028", "rootUrl": "https://androidenterprise.googleapis.com/", "schemas": { "Administrator": { @@ -4015,7 +4015,7 @@ "properties": { "autoUpdatePolicy": { "deprecated": true, -"description": "Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. \"choiceToTheUser\" allows the device's user to configure the app update policy. \"always\" enables auto updates. \"never\" disables auto updates. \"wifiOnly\" enables auto updates only when the device is connected to wifi.", +"description": "Controls when automatic app updates on the device can be applied. Recommended alternative: autoUpdateMode which is set per app, provides greater flexibility around update frequency. When autoUpdateMode is set to AUTO_UPDATE_POSTPONED or AUTO_UPDATE_HIGH_PRIORITY, autoUpdatePolicy has no effect. - choiceToTheUser allows the device's user to configure the app update policy. - always enables auto updates. - never disables auto updates. - wifiOnly enables auto updates only when the device is connected to wifi. *Important:* Changes to app update policies don't affect updates that are in progress. Any policy changes will apply to subsequent app updates. ", "enum": [ "autoUpdatePolicyUnspecified", "choiceToTheUser", diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index 01574e47a9..f4fe690903 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -4978,7 +4978,7 @@ } } }, -"revision": "20240928", +"revision": "20241025", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -9032,7 +9032,7 @@ "type": "object" }, "PySparkJob": { -"description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", +"description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-overview) applications on YARN.", "id": "PySparkJob", "properties": { "archiveUris": { @@ -10444,6 +10444,7 @@ "HBASE", "HIVE_WEBHCAT", "HUDI", +"ICEBERG", "JUPYTER", "PRESTO", "TRINO", @@ -10461,6 +10462,7 @@ "HBase. (beta)", "The Hive Web HCatalog (the REST service for accessing HCatalog).", "Hudi.", +"Iceberg.", "The Jupyter Notebook.", "The Presto query engine.", "The Trino query engine.", diff --git a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json index 355eb33d50..ba829a50c0 100644 --- a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json @@ -1324,7 +1324,7 @@ } } }, -"revision": "20240913", +"revision": "20241025", "rootUrl": "https://firebase.googleapis.com/", "schemas": { "AddFirebaseRequest": { @@ -1436,7 +1436,7 @@ "type": "string" }, "expireTime": { -"description": "Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state.", +"description": "Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -1569,7 +1569,7 @@ "type": "string" }, "expireTime": { -"description": "Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state.", +"description": "Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -1705,7 +1705,7 @@ "type": "string" }, "expireTime": { -"description": "Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state.", +"description": "Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -2020,7 +2020,7 @@ "type": "string" }, "immediate": { -"description": "Determines whether to _immediately_ delete the AndroidApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteAndroidApp.", +"description": "Determines whether to _immediately_ delete the AndroidApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteAndroidApp.", "type": "boolean" }, "validateOnly": { @@ -2042,7 +2042,7 @@ "type": "string" }, "immediate": { -"description": "Determines whether to _immediately_ delete the IosApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteIosApp", +"description": "Determines whether to _immediately_ delete the IosApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteIosApp", "type": "boolean" }, "validateOnly": { @@ -2064,7 +2064,7 @@ "type": "string" }, "immediate": { -"description": "Determines whether to _immediately_ delete the WebApp. If set to true, the App is immediately deleted from the Project and cannot be restored to the Project. If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteWebApp", +"description": "Determines whether to _immediately_ delete the WebApp. If set to true, the App is immediately deleted from the Project and cannot be undeleted (that is, restored to the Project). If not set, defaults to false, which means the App will be set to expire in 30 days. Within the 30 days, the App may be restored to the Project using UndeleteWebApp", "type": "boolean" }, "validateOnly": { @@ -2267,7 +2267,7 @@ "type": "string" }, "expireTime": { -"description": "Output only. Timestamp of when the App will be considered expired and cannot be undeleted. This value is only provided if the App is in the `DELETED` state.", +"description": "Output only. If the App has been removed from the Project, this is the timestamp of when the App is considered expired and will be permanently deleted. After this time, the App cannot be undeleted (that is, restored to the Project). This value is only provided if the App is in the `DELETED` state.", "format": "google-datetime", "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/iam.v1.json b/googleapiclient/discovery_cache/documents/iam.v1.json index a6c4c1960e..2caa7ebcd4 100644 --- a/googleapiclient/discovery_cache/documents/iam.v1.json +++ b/googleapiclient/discovery_cache/documents/iam.v1.json @@ -159,14 +159,14 @@ ], "parameters": { "location": { -"description": "The location of the pool to create. Format: `locations/{location}`.", +"description": "Optional. The location of the pool to create. Format: `locations/{location}`.", "location": "path", "pattern": "^locations/[^/]+$", "required": true, "type": "string" }, "workforcePoolId": { -"description": "The ID to use for the pool, which becomes the final component of the resource name. The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. The prefix `gcp-` is reserved for use by Google, and may not be specified.", +"description": "Optional. The ID to use for the pool, which becomes the final component of the resource name. The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. The prefix `gcp-` is reserved for use by Google, and may not be specified.", "location": "query", "type": "string" } @@ -3188,7 +3188,7 @@ } } }, -"revision": "20241017", +"revision": "20241028", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AccessRestrictions": { @@ -3599,14 +3599,14 @@ }, "clientSecret": { "$ref": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret", -"description": "The optional client secret. Required to enable Authorization Code flow for web sign-in." +"description": "Optional. The optional client secret. Required to enable Authorization Code flow for web sign-in." }, "issuerUri": { "description": "Required. The OIDC issuer URI. Must be a valid URI using the `https` scheme.", "type": "string" }, "jwksJson": { -"description": "OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { \"keys\": [ { \"kty\": \"RSA/EC\", \"alg\": \"\", \"use\": \"sig\", \"kid\": \"\", \"n\": \"\", \"e\": \"\", \"x\": \"\", \"y\": \"\", \"crv\": \"\" } ] }", +"description": "Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { \"keys\": [ { \"kty\": \"RSA/EC\", \"alg\": \"\", \"use\": \"sig\", \"kid\": \"\", \"n\": \"\", \"e\": \"\", \"x\": \"\", \"y\": \"\", \"crv\": \"\" } ] }", "type": "string" }, "webSsoConfig": { @@ -3632,7 +3632,7 @@ "id": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue", "properties": { "plainText": { -"description": "Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response.", +"description": "Optional. Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response.", "type": "string" }, "thumbprint": { @@ -3648,7 +3648,7 @@ "id": "GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig", "properties": { "additionalScopes": { -"description": "Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured.", +"description": "Optional. Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured.", "items": { "type": "string" }, @@ -4998,15 +4998,15 @@ false "description": "Optional. Configure access restrictions on the workforce pool users. This is an optional field. If specified web sign-in can be restricted to given set of services or programmatic sign-in can be disabled for pool users." }, "description": { -"description": "A user-specified description of the pool. Cannot exceed 256 characters.", +"description": "Optional. A user-specified description of the pool. Cannot exceed 256 characters.", "type": "string" }, "disabled": { -"description": "Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.", +"description": "Optional. Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.", "type": "boolean" }, "displayName": { -"description": "A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters.", +"description": "Optional. A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters.", "type": "string" }, "expireTime": { @@ -5025,7 +5025,7 @@ false "type": "string" }, "sessionDuration": { -"description": "Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion.", +"description": "Optional. Duration that the Google Cloud access tokens, console sign-in sessions, and `gcloud` sign-in sessions from this pool are valid. Must be greater than 15 minutes (900s) and less than 12 hours (43200s). If `session_duration` is not configured, minted credentials have a default duration of one hour (3600s). For SAML providers, the lifetime of the token is the minimum of the `session_duration` and the `SessionNotOnOrAfter` claim in the SAML assertion.", "format": "google-duration", "type": "string" }, @@ -5052,7 +5052,7 @@ false "id": "WorkforcePoolProvider", "properties": { "attributeCondition": { -"description": "A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` \"'admins' in google.groups\" ```", +"description": "Optional. A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` \"'admins' in google.groups\" ```", "type": "string" }, "attributeMapping": { @@ -5063,15 +5063,15 @@ false "type": "object" }, "description": { -"description": "A user-specified description of the provider. Cannot exceed 256 characters.", +"description": "Optional. A user-specified description of the provider. Cannot exceed 256 characters.", "type": "string" }, "disabled": { -"description": "Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access.", +"description": "Optional. Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access.", "type": "boolean" }, "displayName": { -"description": "A user-specified display name for the provider. Cannot exceed 32 characters.", +"description": "Optional. A user-specified display name for the provider. Cannot exceed 32 characters.", "type": "string" }, "expireTime": { diff --git a/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json b/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json index 66989a4e77..45bfd5f698 100644 --- a/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json +++ b/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json @@ -1417,7 +1417,7 @@ } } }, -"revision": "20241023", +"revision": "20241028", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { "Accepted": { @@ -1714,7 +1714,7 @@ "type": "object" }, "CarrierRate": { -"description": "A list of carrier rates that can be referred to by `main_table` or `single_value`.", +"description": "A list of carrier rates that can be referred to by `main_table` or `single_value`. Supported carrier services are defined in https://support.google.com/merchants/answer/12577710?hl=en&ref_topic=12570808&sjid=10662598224319463032-NC#zippy=%2Cdelivery-cost-rate-type%2Ccarrier-rate-au-de-uk-and-us-only.", "id": "CarrierRate", "properties": { "carrier": { @@ -3455,7 +3455,7 @@ false "id": "WarehouseBasedDeliveryTime", "properties": { "carrier": { -"description": "Required. Carrier, such as `\"UPS\"` or `\"Fedex\"`.", +"description": "Required. Carrier, such as `\"UPS\"` or `\"Fedex\"`. [supported carriers](https://support.google.com/merchants/answer/7050921#zippy=%2Ccarrier-rates-au-de-uk-and-us-only)", "type": "string" }, "carrierService": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v2beta.json b/googleapiclient/discovery_cache/documents/osconfig.v2beta.json new file mode 100644 index 0000000000..5c8345bd6c --- /dev/null +++ b/googleapiclient/discovery_cache/documents/osconfig.v2beta.json @@ -0,0 +1,2274 @@ +{ +"auth": { +"oauth2": { +"scopes": { +"https://www.googleapis.com/auth/cloud-platform": { +"description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." +} +} +} +}, +"basePath": "", +"baseUrl": "https://osconfig.googleapis.com/", +"batchPath": "batch", +"canonicalName": "OS Config", +"description": "OS management tools that can be used for patch management, patch compliance, and configuration management on VM instances.", +"discoveryVersion": "v1", +"documentationLink": "https://cloud.google.com/compute/docs/osconfig/rest", +"fullyEncodeReservedExpansion": true, +"icons": { +"x16": "http://www.google.com/images/icons/product/search-16.gif", +"x32": "http://www.google.com/images/icons/product/search-32.gif" +}, +"id": "osconfig:v2beta", +"kind": "discovery#restDescription", +"mtlsRootUrl": "https://osconfig.mtls.googleapis.com/", +"name": "osconfig", +"ownerDomain": "google.com", +"ownerName": "Google", +"parameters": { +"$.xgafv": { +"description": "V1 error format.", +"enum": [ +"1", +"2" +], +"enumDescriptions": [ +"v1 error format", +"v2 error format" +], +"location": "query", +"type": "string" +}, +"access_token": { +"description": "OAuth access token.", +"location": "query", +"type": "string" +}, +"alt": { +"default": "json", +"description": "Data format for response.", +"enum": [ +"json", +"media", +"proto" +], +"enumDescriptions": [ +"Responses with Content-Type of application/json", +"Media download with context-dependent Content-Type", +"Responses with Content-Type of application/x-protobuf" +], +"location": "query", +"type": "string" +}, +"callback": { +"description": "JSONP", +"location": "query", +"type": "string" +}, +"fields": { +"description": "Selector specifying which fields to include in a partial response.", +"location": "query", +"type": "string" +}, +"key": { +"description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", +"location": "query", +"type": "string" +}, +"oauth_token": { +"description": "OAuth 2.0 token for the current user.", +"location": "query", +"type": "string" +}, +"prettyPrint": { +"default": "true", +"description": "Returns response with indentations and line breaks.", +"location": "query", +"type": "boolean" +}, +"quotaUser": { +"description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", +"location": "query", +"type": "string" +}, +"uploadType": { +"description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", +"location": "query", +"type": "string" +}, +"upload_protocol": { +"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", +"location": "query", +"type": "string" +} +}, +"protocol": "rest", +"resources": { +"folders": { +"resources": { +"locations": { +"resources": { +"global": { +"resources": { +"policyOrchestrators": { +"methods": { +"create": { +"description": "Creates a new policy orchestrator under the given folder resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).", +"flatPath": "v2beta/folders/{foldersId}/locations/global/policyOrchestrators", +"httpMethod": "POST", +"id": "osconfig.folders.locations.global.policyOrchestrators.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global'", +"location": "path", +"pattern": "^folders/[^/]+/locations/global$", +"required": true, +"type": "string" +}, +"policyOrchestratorId": { +"description": "Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.", +"location": "query", +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes an existing policy orchestrator resource, parented by a folder.", +"flatPath": "v2beta/folders/{foldersId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "DELETE", +"id": "osconfig.folders.locations.global.policyOrchestrators.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"etag": { +"description": "Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.", +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. Name of the resource to be deleted.", +"location": "path", +"pattern": "^folders/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Retrieves an existing policy orchestrator, parented by a folder.", +"flatPath": "v2beta/folders/{foldersId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "GET", +"id": "osconfig.folders.locations.global.policyOrchestrators.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name.", +"location": "path", +"pattern": "^folders/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists the policy orchestrators under the given parent folder resource.", +"flatPath": "v2beta/folders/{foldersId}/locations/global/policyOrchestrators", +"httpMethod": "GET", +"id": "osconfig.folders.locations.global.policyOrchestrators.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. Filtering results", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. Hint for how to order the results", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A token identifying a page of results the server should return.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource name.", +"location": "path", +"pattern": "^folders/[^/]+/locations/global$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__ListPolicyOrchestratorsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates an existing policy orchestrator, parented by a folder.", +"flatPath": "v2beta/folders/{foldersId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "PATCH", +"id": "osconfig.folders.locations.global.policyOrchestrators.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`", +"location": "path", +"pattern": "^folders/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"flatPath": "v2beta/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "osconfig.folders.locations.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}:cancel", +"request": { +"$ref": "CancelOperationRequest" +}, +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v2beta/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "osconfig.folders.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v2beta/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "osconfig.folders.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v2beta/folders/{foldersId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "osconfig.folders.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}/operations", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, +"organizations": { +"resources": { +"locations": { +"resources": { +"global": { +"resources": { +"policyOrchestrators": { +"methods": { +"create": { +"description": "Creates a new policy orchestrator under the given organizations resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).", +"flatPath": "v2beta/organizations/{organizationsId}/locations/global/policyOrchestrators", +"httpMethod": "POST", +"id": "osconfig.organizations.locations.global.policyOrchestrators.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global'", +"location": "path", +"pattern": "^organizations/[^/]+/locations/global$", +"required": true, +"type": "string" +}, +"policyOrchestratorId": { +"description": "Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.", +"location": "query", +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes an existing policy orchestrator resource, parented by an organization.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "DELETE", +"id": "osconfig.organizations.locations.global.policyOrchestrators.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"etag": { +"description": "Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.", +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. Name of the resource to be deleted.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Retrieves an existing policy orchestrator, parented by an organization.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "GET", +"id": "osconfig.organizations.locations.global.policyOrchestrators.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists the policy orchestrators under the given parent organization resource.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/global/policyOrchestrators", +"httpMethod": "GET", +"id": "osconfig.organizations.locations.global.policyOrchestrators.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. Filtering results", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. Hint for how to order the results", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A token identifying a page of results the server should return.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource name.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/global$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__ListPolicyOrchestratorsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates an existing policy orchestrator, parented by an organization.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "PATCH", +"id": "osconfig.organizations.locations.global.policyOrchestrators.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`", +"location": "path", +"pattern": "^organizations/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "osconfig.organizations.locations.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}:cancel", +"request": { +"$ref": "CancelOperationRequest" +}, +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "osconfig.organizations.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "osconfig.organizations.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v2beta/organizations/{organizationsId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "osconfig.organizations.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}/operations", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, +"projects": { +"resources": { +"locations": { +"resources": { +"global": { +"resources": { +"policyOrchestrators": { +"methods": { +"create": { +"description": "Creates a new policy orchestrator under the given project resource. `name` field of the given orchestrator are ignored and instead replaced by a product of `parent` and `policy_orchestrator_id`. Orchestrator state field might be only set to `ACTIVE`, `STOPPED` or omitted (in which case, the created resource will be in `ACTIVE` state anyway).", +"flatPath": "v2beta/projects/{projectsId}/locations/global/policyOrchestrators", +"httpMethod": "POST", +"id": "osconfig.projects.locations.global.policyOrchestrators.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource name in the form of: `organizations/{organization_id}/locations/global` `folders/{folder_id}/locations/global` 'projects/{project_id_or_number}/locations/global'", +"location": "path", +"pattern": "^projects/[^/]+/locations/global$", +"required": true, +"type": "string" +}, +"policyOrchestratorId": { +"description": "Required. The logical identifier of the policy orchestrator, with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the parent.", +"location": "query", +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes an existing policy orchestrator resource, parented by a project.", +"flatPath": "v2beta/projects/{projectsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "DELETE", +"id": "osconfig.projects.locations.global.policyOrchestrators.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"etag": { +"description": "Optional. The current etag of the policy orchestrator. If an etag is provided and does not match the current etag of the policy orchestrator, deletion will be blocked and an ABORTED error will be returned.", +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. Name of the resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Retrieves an existing policy orchestrator, parented by a project.", +"flatPath": "v2beta/projects/{projectsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "GET", +"id": "osconfig.projects.locations.global.policyOrchestrators.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name.", +"location": "path", +"pattern": "^projects/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists the policy orchestrators under the given parent project resource.", +"flatPath": "v2beta/projects/{projectsId}/locations/global/policyOrchestrators", +"httpMethod": "GET", +"id": "osconfig.projects.locations.global.policyOrchestrators.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. Filtering results", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. Hint for how to order the results", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. Requested page size. Server may return fewer items than requested. If unspecified, server will pick an appropriate default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A token identifying a page of results the server should return.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource name.", +"location": "path", +"pattern": "^projects/[^/]+/locations/global$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+parent}/policyOrchestrators", +"response": { +"$ref": "GoogleCloudOsconfigV2beta__ListPolicyOrchestratorsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates an existing policy orchestrator, parented by a project.", +"flatPath": "v2beta/projects/{projectsId}/locations/global/policyOrchestrators/{policyOrchestratorsId}", +"httpMethod": "PATCH", +"id": "osconfig.projects.locations.global.policyOrchestrators.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/global/policyOrchestrators/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}", +"request": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "osconfig.projects.locations.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}:cancel", +"request": { +"$ref": "CancelOperationRequest" +}, +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "osconfig.projects.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "osconfig.projects.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2beta/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "osconfig.projects.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v2beta/{+name}/operations", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +} +}, +"revision": "20241028", +"rootUrl": "https://osconfig.googleapis.com/", +"schemas": { +"CancelOperationRequest": { +"description": "The request message for Operations.CancelOperation.", +"id": "CancelOperationRequest", +"properties": {}, +"type": "object" +}, +"Empty": { +"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", +"id": "Empty", +"properties": {}, +"type": "object" +}, +"FixedOrPercent": { +"description": "Message encapsulating a value that can be either absolute (\"fixed\") or relative (\"percent\") to a value.", +"id": "FixedOrPercent", +"properties": { +"fixed": { +"description": "Specifies a fixed value.", +"format": "int32", +"type": "integer" +}, +"percent": { +"description": "Specifies the relative value defined as a percentage, which will be multiplied by a reference value.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV1__OSPolicyAssignmentOperationMetadata": { +"description": "OS policy assignment operation metadata provided by OS policy assignment API methods that return long running operations.", +"id": "GoogleCloudOsconfigV1__OSPolicyAssignmentOperationMetadata", +"properties": { +"apiMethod": { +"description": "The OS policy assignment API method.", +"enum": [ +"API_METHOD_UNSPECIFIED", +"CREATE", +"UPDATE", +"DELETE" +], +"enumDescriptions": [ +"Invalid value", +"Create OS policy assignment API method", +"Update OS policy assignment API method", +"Delete OS policy assignment API method" +], +"type": "string" +}, +"osPolicyAssignment": { +"description": "Reference to the `OSPolicyAssignment` API resource. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id@revision_id}`", +"type": "string" +}, +"rolloutStartTime": { +"description": "Rollout start time", +"format": "google-datetime", +"type": "string" +}, +"rolloutState": { +"description": "State of the rollout", +"enum": [ +"ROLLOUT_STATE_UNSPECIFIED", +"IN_PROGRESS", +"CANCELLING", +"CANCELLED", +"SUCCEEDED" +], +"enumDescriptions": [ +"Invalid value", +"The rollout is in progress.", +"The rollout is being cancelled.", +"The rollout is cancelled.", +"The rollout has completed successfully." +], +"type": "string" +}, +"rolloutUpdateTime": { +"description": "Rollout update time", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta_OrchestrationScope_LocationSelector": { +"description": "Selector containing locations in scope.", +"id": "GoogleCloudOsconfigV2beta_OrchestrationScope_LocationSelector", +"properties": { +"includedLocations": { +"description": "Optional. Names of the locations in scope. Format: `us-central1-a`", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta_OrchestrationScope_ResourceHierarchySelector": { +"description": "Selector containing Cloud Resource Manager resource hierarchy nodes.", +"id": "GoogleCloudOsconfigV2beta_OrchestrationScope_ResourceHierarchySelector", +"properties": { +"includedFolders": { +"description": "Optional. Names of the folders in scope. Format: `folders/{folder_id}`", +"items": { +"type": "string" +}, +"type": "array" +}, +"includedProjects": { +"description": "Optional. Names of the projects in scope. Format: `projects/{project_number}`", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta_OrchestrationScope_Selector": { +"description": "Selector for the resources in scope of orchestration.", +"id": "GoogleCloudOsconfigV2beta_OrchestrationScope_Selector", +"properties": { +"locationSelector": { +"$ref": "GoogleCloudOsconfigV2beta_OrchestrationScope_LocationSelector", +"description": "Selector for selecting locations." +}, +"resourceHierarchySelector": { +"$ref": "GoogleCloudOsconfigV2beta_OrchestrationScope_ResourceHierarchySelector", +"description": "Selector for selecting resource hierarchy." +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta_PolicyOrchestrator_IterationState": { +"description": "Describes the state of a single iteration of the orchestrator.", +"id": "GoogleCloudOsconfigV2beta_PolicyOrchestrator_IterationState", +"properties": { +"error": { +"$ref": "Status", +"description": "Output only. Error thrown in the wave iteration.", +"readOnly": true +}, +"failedActions": { +"description": "Output only. Number of orchestration actions which failed so far. For more details, query the Cloud Logs.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"finishTime": { +"description": "Output only. Finish time of the wave iteration.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"performedActions": { +"description": "Output only. Overall number of actions done by the orchestrator so far.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"progress": { +"description": "Output only. An estimated percentage of the progress. Number between 0 and 100.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"rolloutResource": { +"description": "Output only. Handle to the Progressive Rollouts API rollout resource, which contains detailed information about a particular orchestration iteration.", +"readOnly": true, +"type": "string" +}, +"startTime": { +"description": "Output only. Start time of the wave iteration.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. State of the iteration.", +"enum": [ +"STATE_UNSPECIFIED", +"PROCESSING", +"COMPLETED", +"FAILED", +"CANCELLED", +"UNKNOWN" +], +"enumDescriptions": [ +"Default value. This value is unused.", +"Iteration is in progress.", +"Iteration completed, with all actions being successful.", +"Iteration completed, with failures.", +"Iteration was explicitly cancelled.", +"Impossible to determine current state of the iteration." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta_PolicyOrchestrator_OrchestrationState": { +"description": "Describes the state of the orchestration process.", +"id": "GoogleCloudOsconfigV2beta_PolicyOrchestrator_OrchestrationState", +"properties": { +"currentIterationState": { +"$ref": "GoogleCloudOsconfigV2beta_PolicyOrchestrator_IterationState", +"description": "Output only. Current Wave iteration state.", +"readOnly": true +}, +"previousIterationState": { +"$ref": "GoogleCloudOsconfigV2beta_PolicyOrchestrator_IterationState", +"description": "Output only. Previous Wave iteration state.", +"readOnly": true +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta__ListPolicyOrchestratorsResponse": { +"description": "Response for the list policy orchestrator resources.", +"id": "GoogleCloudOsconfigV2beta__ListPolicyOrchestratorsResponse", +"properties": { +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +}, +"policyOrchestrators": { +"description": "The policy orchestrators for the specified parent resource.", +"items": { +"$ref": "GoogleCloudOsconfigV2beta__PolicyOrchestrator" +}, +"type": "array" +}, +"unreachable": { +"description": "Locations that could not be reached.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta__OperationMetadata": { +"description": "Represents the metadata of the long-running operation.", +"id": "GoogleCloudOsconfigV2beta__OperationMetadata", +"properties": { +"apiVersion": { +"description": "Output only. API version used to start the operation.", +"readOnly": true, +"type": "string" +}, +"createTime": { +"description": "Output only. The time the operation was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"endTime": { +"description": "Output only. The time the operation finished running.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"requestedCancellation": { +"description": "Output only. Identifies whether the user has requested cancellation of the operation. Operations that have been cancelled successfully have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"readOnly": true, +"type": "boolean" +}, +"statusMessage": { +"description": "Output only. Human-readable status of the operation, if any.", +"readOnly": true, +"type": "string" +}, +"target": { +"description": "Output only. Server-defined resource path for the target of the operation.", +"readOnly": true, +"type": "string" +}, +"verb": { +"description": "Output only. Name of the verb executed by the operation.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta__OrchestratedResource": { +"description": "Represents a resource that is being orchestrated by the policy orchestrator.", +"id": "GoogleCloudOsconfigV2beta__OrchestratedResource", +"properties": { +"id": { +"description": "Optional. ID of the resource to be used while generating set of affected resources. For UPSERT action the value is auto-generated during PolicyOrchestrator creation when not set. When the value is set it should following next restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the project. For DELETE action, ID must be specified explicitly during PolicyOrchestrator creation.", +"type": "string" +}, +"osPolicyAssignmentV1Payload": { +"$ref": "OSPolicyAssignment", +"description": "Optional. OSPolicyAssignment resource to be created, updated or deleted. Name field is ignored and replace with a generated value. With this field set, orchestrator will perform actions on `project/{project}/locations/{zone}/osPolicyAssignments/{resource_id}` resources, where `project` and `zone` pairs come from the expanded scope, and `resource_id` comes from the `resource_id` field of orchestrator resource." +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta__OrchestrationScope": { +"description": "Defines a set of selectors which drive which resources are in scope of policy orchestration.", +"id": "GoogleCloudOsconfigV2beta__OrchestrationScope", +"properties": { +"selectors": { +"description": "Optional. Selectors of the orchestration scope. There is a logical AND between each selector defined. When there is no explicit `ResourceHierarchySelector` selector specified, the scope is by default bounded to the parent of the policy orchestrator resource.", +"items": { +"$ref": "GoogleCloudOsconfigV2beta_OrchestrationScope_Selector" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudOsconfigV2beta__PolicyOrchestrator": { +"description": "PolicyOrchestrator helps managing project+zone level policy resources (e.g. OS Policy Assignments), by providing tools to create, update and delete them across projects and locations, at scale. Policy orchestrator functions as an endless loop. Each iteration orchestrator computes a set of resources that should be affected, then progressively applies changes to them. If for some reason this set of resources changes over time (e.g. new projects are added), the future loop iterations will address that. Orchestrator can either upsert or delete policy resources. For more details, see the description of the `action`, and `orchestrated_resource` fields. Note that policy orchestrator do not \"manage\" the resources it creates. Every iteration is independent and only minimal history of past actions is retained (apart from Cloud Logging). If orchestrator gets deleted, it does not affect the resources it created in the past. Those will remain where they were. Same applies if projects are removed from the orchestrator's scope.", +"id": "GoogleCloudOsconfigV2beta__PolicyOrchestrator", +"properties": { +"action": { +"description": "Required. Action to be done by the orchestrator in `projects/{project_id}/zones/{zone_id}` locations defined by the `orchestration_scope`. Allowed values: - `UPSERT` - Orchestrator will create or update target resources. - `DELETE` - Orchestrator will delete target resources, if they exist", +"type": "string" +}, +"createTime": { +"description": "Output only. Timestamp when the policy orchestrator resource was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Freeform text describing the purpose of the resource.", +"type": "string" +}, +"etag": { +"description": "Output only. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", +"readOnly": true, +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. Labels as key value pairs", +"type": "object" +}, +"name": { +"description": "Immutable. Identifier. In form of * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}`", +"type": "string" +}, +"orchestratedResource": { +"$ref": "GoogleCloudOsconfigV2beta__OrchestratedResource", +"description": "Required. Resource to be orchestrated by the policy orchestrator." +}, +"orchestrationScope": { +"$ref": "GoogleCloudOsconfigV2beta__OrchestrationScope", +"description": "Optional. Defines scope for the orchestration, in context of the enclosing PolicyOrchestrator resource. Scope is expanded into a list of pairs, in which the rollout action will take place. Expansion starts with a Folder resource parenting the PolicyOrchestrator resource: - All the descendant projects are listed. - List of project is cross joined with a list of all available zones. - Resulting list of pairs is filtered according to the selectors." +}, +"orchestrationState": { +"$ref": "GoogleCloudOsconfigV2beta_PolicyOrchestrator_OrchestrationState", +"description": "Output only. State of the orchestration.", +"readOnly": true +}, +"reconciling": { +"description": "Output only. Set to true, if the there are ongoing changes being applied by the orchestrator.", +"readOnly": true, +"type": "boolean" +}, +"state": { +"description": "Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. Allowed values: - `ACTIVE` - orchestrator is actively looking for actions to be taken. - `STOPPED` - orchestrator won't make any changes. Note: There might be more states added in the future. We use string here instead of an enum, to avoid the need of propagating new states to all the client code.", +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when the policy orchestrator resource was last modified.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"ListOperationsResponse": { +"description": "The response message for Operations.ListOperations.", +"id": "ListOperationsResponse", +"properties": { +"nextPageToken": { +"description": "The standard List next-page token.", +"type": "string" +}, +"operations": { +"description": "A list of operations that matches the specified filter in the request.", +"items": { +"$ref": "Operation" +}, +"type": "array" +} +}, +"type": "object" +}, +"OSPolicy": { +"description": "An OS policy defines the desired state configuration for a VM.", +"id": "OSPolicy", +"properties": { +"allowNoResourceGroupMatch": { +"description": "This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.", +"type": "boolean" +}, +"description": { +"description": "Policy description. Length of the description is limited to 1024 characters.", +"type": "string" +}, +"id": { +"description": "Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.", +"type": "string" +}, +"mode": { +"description": "Required. Policy mode", +"enum": [ +"MODE_UNSPECIFIED", +"VALIDATION", +"ENFORCEMENT" +], +"enumDescriptions": [ +"Invalid mode", +"This mode checks if the configuration resources in the policy are in their desired state. No actions are performed if they are not in the desired state. This mode is used for reporting purposes.", +"This mode checks if the configuration resources in the policy are in their desired state, and if not, enforces the desired state." +], +"type": "string" +}, +"resourceGroups": { +"description": "Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`", +"items": { +"$ref": "OSPolicyResourceGroup" +}, +"type": "array" +} +}, +"type": "object" +}, +"OSPolicyAssignment": { +"description": "OS policy assignment is an API resource that is used to apply a set of OS policies to a dynamically targeted group of Compute Engine VM instances. An OS policy is used to define the desired state configuration for a Compute Engine VM instance through a set of configuration resources that provide capabilities such as installing or removing software packages, or executing a script. For more information about the OS policy resource definitions and examples, see [OS policy and OS policy assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies).", +"id": "OSPolicyAssignment", +"properties": { +"baseline": { +"description": "Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.", +"readOnly": true, +"type": "boolean" +}, +"deleted": { +"description": "Output only. Indicates that this revision deletes the OS policy assignment.", +"readOnly": true, +"type": "boolean" +}, +"description": { +"description": "OS policy assignment description. Length of the description is limited to 1024 characters.", +"type": "string" +}, +"etag": { +"description": "The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.", +"type": "string" +}, +"instanceFilter": { +"$ref": "OSPolicyAssignmentInstanceFilter", +"description": "Required. Filter to select VMs." +}, +"name": { +"description": "Resource name. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id}` This field is ignored when you create an OS policy assignment.", +"type": "string" +}, +"osPolicies": { +"description": "Required. List of OS policies to be applied to the VMs.", +"items": { +"$ref": "OSPolicy" +}, +"type": "array" +}, +"reconciling": { +"description": "Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING", +"readOnly": true, +"type": "boolean" +}, +"revisionCreateTime": { +"description": "Output only. The timestamp that the revision was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"revisionId": { +"description": "Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment", +"readOnly": true, +"type": "string" +}, +"rollout": { +"$ref": "OSPolicyAssignmentRollout", +"description": "Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted." +}, +"rolloutState": { +"description": "Output only. OS policy assignment rollout state", +"enum": [ +"ROLLOUT_STATE_UNSPECIFIED", +"IN_PROGRESS", +"CANCELLING", +"CANCELLED", +"SUCCEEDED" +], +"enumDescriptions": [ +"Invalid value", +"The rollout is in progress.", +"The rollout is being cancelled.", +"The rollout is cancelled.", +"The rollout has completed successfully." +], +"readOnly": true, +"type": "string" +}, +"uid": { +"description": "Output only. Server generated unique id for the OS policy assignment resource.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyAssignmentInstanceFilter": { +"description": "Filters to select target VMs for an assignment. If more than one filter criteria is specified below, a VM will be selected if and only if it satisfies all of them.", +"id": "OSPolicyAssignmentInstanceFilter", +"properties": { +"all": { +"description": "Target all VMs in the project. If true, no other criteria is permitted.", +"type": "boolean" +}, +"exclusionLabels": { +"description": "List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.", +"items": { +"$ref": "OSPolicyAssignmentLabelSet" +}, +"type": "array" +}, +"inclusionLabels": { +"description": "List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.", +"items": { +"$ref": "OSPolicyAssignmentLabelSet" +}, +"type": "array" +}, +"inventories": { +"description": "List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.", +"items": { +"$ref": "OSPolicyAssignmentInstanceFilterInventory" +}, +"type": "array" +} +}, +"type": "object" +}, +"OSPolicyAssignmentInstanceFilterInventory": { +"description": "VM inventory details.", +"id": "OSPolicyAssignmentInstanceFilterInventory", +"properties": { +"osShortName": { +"description": "Required. The OS short name", +"type": "string" +}, +"osVersion": { +"description": "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyAssignmentLabelSet": { +"description": "Message representing label set. * A label is a key value pair set for a VM. * A LabelSet is a set of labels. * Labels within a LabelSet are ANDed. In other words, a LabelSet is applicable for a VM only if it matches all the labels in the LabelSet. * Example: A LabelSet with 2 labels: `env=prod` and `type=webserver` will only be applicable for those VMs with both labels present.", +"id": "OSPolicyAssignmentLabelSet", +"properties": { +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", +"type": "object" +} +}, +"type": "object" +}, +"OSPolicyAssignmentOperationMetadata": { +"description": "OS policy assignment operation metadata provided by OS policy assignment API methods that return long running operations.", +"id": "OSPolicyAssignmentOperationMetadata", +"properties": { +"apiMethod": { +"description": "The OS policy assignment API method.", +"enum": [ +"API_METHOD_UNSPECIFIED", +"CREATE", +"UPDATE", +"DELETE" +], +"enumDescriptions": [ +"Invalid value", +"Create OS policy assignment API method", +"Update OS policy assignment API method", +"Delete OS policy assignment API method" +], +"type": "string" +}, +"osPolicyAssignment": { +"description": "Reference to the `OSPolicyAssignment` API resource. Format: `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id@revision_id}`", +"type": "string" +}, +"rolloutStartTime": { +"description": "Rollout start time", +"format": "google-datetime", +"type": "string" +}, +"rolloutState": { +"description": "State of the rollout", +"enum": [ +"ROLLOUT_STATE_UNSPECIFIED", +"IN_PROGRESS", +"CANCELLING", +"CANCELLED", +"SUCCEEDED" +], +"enumDescriptions": [ +"Invalid value", +"The rollout is in progress.", +"The rollout is being cancelled.", +"The rollout is cancelled.", +"The rollout has completed successfully." +], +"type": "string" +}, +"rolloutUpdateTime": { +"description": "Rollout update time", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyAssignmentRollout": { +"description": "Message to configure the rollout at the zonal level for the OS policy assignment.", +"id": "OSPolicyAssignmentRollout", +"properties": { +"disruptionBudget": { +"$ref": "FixedOrPercent", +"description": "Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment." +}, +"minWaitDuration": { +"description": "Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyInventoryFilter": { +"description": "Filtering criteria to select VMs based on inventory details.", +"id": "OSPolicyInventoryFilter", +"properties": { +"osShortName": { +"description": "Required. The OS short name", +"type": "string" +}, +"osVersion": { +"description": "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResource": { +"description": "An OS policy resource is used to define the desired state configuration and provides a specific functionality like installing/removing packages, executing a script etc. The system ensures that resources are always in their desired state by taking necessary actions if they have drifted from their desired state.", +"id": "OSPolicyResource", +"properties": { +"exec": { +"$ref": "OSPolicyResourceExecResource", +"description": "Exec resource" +}, +"file": { +"$ref": "OSPolicyResourceFileResource", +"description": "File resource" +}, +"id": { +"description": "Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.", +"type": "string" +}, +"pkg": { +"$ref": "OSPolicyResourcePackageResource", +"description": "Package resource" +}, +"repository": { +"$ref": "OSPolicyResourceRepositoryResource", +"description": "Package repository resource" +} +}, +"type": "object" +}, +"OSPolicyResourceExecResource": { +"description": "A resource that allows executing scripts on the VM. The `ExecResource` has 2 stages: `validate` and `enforce` and both stages accept a script as an argument to execute. When the `ExecResource` is applied by the agent, it first executes the script in the `validate` stage. The `validate` stage can signal that the `ExecResource` is already in the desired state by returning an exit code of `100`. If the `ExecResource` is not in the desired state, it should return an exit code of `101`. Any other exit code returned by this stage is considered an error. If the `ExecResource` is not in the desired state based on the exit code from the `validate` stage, the agent proceeds to execute the script from the `enforce` stage. If the `ExecResource` is already in the desired state, the `enforce` stage will not be run. Similar to `validate` stage, the `enforce` stage should return an exit code of `100` to indicate that the resource in now in its desired state. Any other exit code is considered an error. NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to have an explicit indicator of `in desired state`, `not in desired state` and errors. Because, for example, Powershell will always return an exit code of `0` unless an `exit` statement is provided in the script. So, for reasons of consistency and being explicit, exit codes `100` and `101` were chosen.", +"id": "OSPolicyResourceExecResource", +"properties": { +"enforce": { +"$ref": "OSPolicyResourceExecResourceExec", +"description": "What to run to bring this resource into the desired state. An exit code of 100 indicates \"success\", any other exit code indicates a failure running enforce." +}, +"validate": { +"$ref": "OSPolicyResourceExecResourceExec", +"description": "Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates \"in desired state\", and exit code of 101 indicates \"not in desired state\". Any other exit code indicates a failure running validate." +} +}, +"type": "object" +}, +"OSPolicyResourceExecResourceExec": { +"description": "A file or script to execute.", +"id": "OSPolicyResourceExecResourceExec", +"properties": { +"args": { +"description": "Optional arguments to pass to the source during execution.", +"items": { +"type": "string" +}, +"type": "array" +}, +"file": { +"$ref": "OSPolicyResourceFile", +"description": "A remote or local file." +}, +"interpreter": { +"description": "Required. The script interpreter to use.", +"enum": [ +"INTERPRETER_UNSPECIFIED", +"NONE", +"SHELL", +"POWERSHELL" +], +"enumDescriptions": [ +"Invalid value, the request will return validation error.", +"If an interpreter is not specified, the source is executed directly. This execution, without an interpreter, only succeeds for executables and scripts that have shebang lines.", +"Indicates that the script runs with `/bin/sh` on Linux and `cmd.exe` on Windows.", +"Indicates that the script runs with PowerShell." +], +"type": "string" +}, +"outputFilePath": { +"description": "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 500K bytes.", +"type": "string" +}, +"script": { +"description": "An inline script. The size of the script is limited to 32KiB.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceFile": { +"description": "A remote or local file.", +"id": "OSPolicyResourceFile", +"properties": { +"allowInsecure": { +"description": "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", +"type": "boolean" +}, +"gcs": { +"$ref": "OSPolicyResourceFileGcs", +"description": "A Cloud Storage object." +}, +"localPath": { +"description": "A local path within the VM to use.", +"type": "string" +}, +"remote": { +"$ref": "OSPolicyResourceFileRemote", +"description": "A generic remote file." +} +}, +"type": "object" +}, +"OSPolicyResourceFileGcs": { +"description": "Specifies a file available as a Cloud Storage Object.", +"id": "OSPolicyResourceFileGcs", +"properties": { +"bucket": { +"description": "Required. Bucket of the Cloud Storage object.", +"type": "string" +}, +"generation": { +"description": "Generation number of the Cloud Storage object.", +"format": "int64", +"type": "string" +}, +"object": { +"description": "Required. Name of the Cloud Storage object.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceFileRemote": { +"description": "Specifies a file available via some URI.", +"id": "OSPolicyResourceFileRemote", +"properties": { +"sha256Checksum": { +"description": "SHA256 checksum of the remote file.", +"type": "string" +}, +"uri": { +"description": "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceFileResource": { +"description": "A resource that manages the state of a file.", +"id": "OSPolicyResourceFileResource", +"properties": { +"content": { +"description": "A a file with this content. The size of the content is limited to 32KiB.", +"type": "string" +}, +"file": { +"$ref": "OSPolicyResourceFile", +"description": "A remote or local source." +}, +"path": { +"description": "Required. The absolute path of the file within the VM.", +"type": "string" +}, +"permissions": { +"description": "Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4", +"type": "string" +}, +"state": { +"description": "Required. Desired state of the file.", +"enum": [ +"DESIRED_STATE_UNSPECIFIED", +"PRESENT", +"ABSENT", +"CONTENTS_MATCH" +], +"enumDescriptions": [ +"Unspecified is invalid.", +"Ensure file at path is present.", +"Ensure file at path is absent.", +"Ensure the contents of the file at path matches. If the file does not exist it will be created." +], +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceGroup": { +"description": "Resource groups provide a mechanism to group OS policy resources. Resource groups enable OS policy authors to create a single OS policy to be applied to VMs running different operating Systems. When the OS policy is applied to a target VM, the appropriate resource group within the OS policy is selected based on the `OSFilter` specified within the resource group.", +"id": "OSPolicyResourceGroup", +"properties": { +"inventoryFilters": { +"description": "List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.", +"items": { +"$ref": "OSPolicyInventoryFilter" +}, +"type": "array" +}, +"resources": { +"description": "Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.", +"items": { +"$ref": "OSPolicyResource" +}, +"type": "array" +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResource": { +"description": "A resource that manages a system package.", +"id": "OSPolicyResourcePackageResource", +"properties": { +"apt": { +"$ref": "OSPolicyResourcePackageResourceAPT", +"description": "A package managed by Apt." +}, +"deb": { +"$ref": "OSPolicyResourcePackageResourceDeb", +"description": "A deb package file." +}, +"desiredState": { +"description": "Required. The desired state the agent should maintain for this package.", +"enum": [ +"DESIRED_STATE_UNSPECIFIED", +"INSTALLED", +"REMOVED" +], +"enumDescriptions": [ +"Unspecified is invalid.", +"Ensure that the package is installed.", +"The agent ensures that the package is not installed and uninstalls it if detected." +], +"type": "string" +}, +"googet": { +"$ref": "OSPolicyResourcePackageResourceGooGet", +"description": "A package managed by GooGet." +}, +"msi": { +"$ref": "OSPolicyResourcePackageResourceMSI", +"description": "An MSI package." +}, +"rpm": { +"$ref": "OSPolicyResourcePackageResourceRPM", +"description": "An rpm package file." +}, +"yum": { +"$ref": "OSPolicyResourcePackageResourceYUM", +"description": "A package managed by YUM." +}, +"zypper": { +"$ref": "OSPolicyResourcePackageResourceZypper", +"description": "A package managed by Zypper." +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceAPT": { +"description": "A package managed by APT. - install: `apt-get update && apt-get -y install [name]` - remove: `apt-get -y remove [name]`", +"id": "OSPolicyResourcePackageResourceAPT", +"properties": { +"name": { +"description": "Required. Package name.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceDeb": { +"description": "A deb package file. dpkg packages only support INSTALLED state.", +"id": "OSPolicyResourcePackageResourceDeb", +"properties": { +"pullDeps": { +"description": "Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`", +"type": "boolean" +}, +"source": { +"$ref": "OSPolicyResourceFile", +"description": "Required. A deb package." +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceGooGet": { +"description": "A package managed by GooGet. - install: `googet -noconfirm install package` - remove: `googet -noconfirm remove package`", +"id": "OSPolicyResourcePackageResourceGooGet", +"properties": { +"name": { +"description": "Required. Package name.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceMSI": { +"description": "An MSI package. MSI packages only support INSTALLED state.", +"id": "OSPolicyResourcePackageResourceMSI", +"properties": { +"properties": { +"description": "Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.", +"items": { +"type": "string" +}, +"type": "array" +}, +"source": { +"$ref": "OSPolicyResourceFile", +"description": "Required. The MSI package." +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceRPM": { +"description": "An RPM package file. RPM packages only support INSTALLED state.", +"id": "OSPolicyResourcePackageResourceRPM", +"properties": { +"pullDeps": { +"description": "Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`", +"type": "boolean" +}, +"source": { +"$ref": "OSPolicyResourceFile", +"description": "Required. An rpm package." +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceYUM": { +"description": "A package managed by YUM. - install: `yum -y install package` - remove: `yum -y remove package`", +"id": "OSPolicyResourcePackageResourceYUM", +"properties": { +"name": { +"description": "Required. Package name.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourcePackageResourceZypper": { +"description": "A package managed by Zypper. - install: `zypper -y install package` - remove: `zypper -y rm package`", +"id": "OSPolicyResourcePackageResourceZypper", +"properties": { +"name": { +"description": "Required. Package name.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceRepositoryResource": { +"description": "A resource that manages a package repository.", +"id": "OSPolicyResourceRepositoryResource", +"properties": { +"apt": { +"$ref": "OSPolicyResourceRepositoryResourceAptRepository", +"description": "An Apt Repository." +}, +"goo": { +"$ref": "OSPolicyResourceRepositoryResourceGooRepository", +"description": "A Goo Repository." +}, +"yum": { +"$ref": "OSPolicyResourceRepositoryResourceYumRepository", +"description": "A Yum Repository." +}, +"zypper": { +"$ref": "OSPolicyResourceRepositoryResourceZypperRepository", +"description": "A Zypper Repository." +} +}, +"type": "object" +}, +"OSPolicyResourceRepositoryResourceAptRepository": { +"description": "Represents a single apt package repository. These will be added to a repo file that will be managed at `/etc/apt/sources.list.d/google_osconfig.list`.", +"id": "OSPolicyResourceRepositoryResourceAptRepository", +"properties": { +"archiveType": { +"description": "Required. Type of archive files in this repository.", +"enum": [ +"ARCHIVE_TYPE_UNSPECIFIED", +"DEB", +"DEB_SRC" +], +"enumDescriptions": [ +"Unspecified is invalid.", +"Deb indicates that the archive contains binary files.", +"Deb-src indicates that the archive contains source files." +], +"type": "string" +}, +"components": { +"description": "Required. List of components for this repository. Must contain at least one item.", +"items": { +"type": "string" +}, +"type": "array" +}, +"distribution": { +"description": "Required. Distribution of this repository.", +"type": "string" +}, +"gpgKey": { +"description": "URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.", +"type": "string" +}, +"uri": { +"description": "Required. URI for this repository.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceRepositoryResourceGooRepository": { +"description": "Represents a Goo package repository. These are added to a repo file that is managed at `C:/ProgramData/GooGet/repos/google_osconfig.repo`.", +"id": "OSPolicyResourceRepositoryResourceGooRepository", +"properties": { +"name": { +"description": "Required. The name of the repository.", +"type": "string" +}, +"url": { +"description": "Required. The url of the repository.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceRepositoryResourceYumRepository": { +"description": "Represents a single yum package repository. These are added to a repo file that is managed at `/etc/yum.repos.d/google_osconfig.repo`.", +"id": "OSPolicyResourceRepositoryResourceYumRepository", +"properties": { +"baseUrl": { +"description": "Required. The location of the repository directory.", +"type": "string" +}, +"displayName": { +"description": "The display name of the repository.", +"type": "string" +}, +"gpgKeys": { +"description": "URIs of GPG keys.", +"items": { +"type": "string" +}, +"type": "array" +}, +"id": { +"description": "Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.", +"type": "string" +} +}, +"type": "object" +}, +"OSPolicyResourceRepositoryResourceZypperRepository": { +"description": "Represents a single zypper package repository. These are added to a repo file that is managed at `/etc/zypp/repos.d/google_osconfig.repo`.", +"id": "OSPolicyResourceRepositoryResourceZypperRepository", +"properties": { +"baseUrl": { +"description": "Required. The location of the repository directory.", +"type": "string" +}, +"displayName": { +"description": "The display name of the repository.", +"type": "string" +}, +"gpgKeys": { +"description": "URIs of GPG keys.", +"items": { +"type": "string" +}, +"type": "array" +}, +"id": { +"description": "Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.", +"type": "string" +} +}, +"type": "object" +}, +"Operation": { +"description": "This resource represents a long-running operation that is the result of a network API call.", +"id": "Operation", +"properties": { +"done": { +"description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", +"type": "boolean" +}, +"error": { +"$ref": "Status", +"description": "The error result of the operation in case of failure or cancellation." +}, +"metadata": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", +"type": "object" +}, +"name": { +"description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", +"type": "string" +}, +"response": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", +"type": "object" +} +}, +"type": "object" +}, +"Status": { +"description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", +"id": "Status", +"properties": { +"code": { +"description": "The status code, which should be an enum value of google.rpc.Code.", +"format": "int32", +"type": "integer" +}, +"details": { +"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", +"items": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", +"type": "string" +} +}, +"type": "object" +} +}, +"servicePath": "", +"title": "OS Config API", +"version": "v2beta", +"version_module": true +} \ No newline at end of file