From 8c53239ba1ddcbb7691f99db47bce7a8e0b737cd Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:09:25 +0500 Subject: [PATCH] feat: [google-cloud-automl] Add support for opt-in debug logging (#13332) BEGIN_COMMIT_OVERRIDE feat: Add support for opt-in debug logging fix: Fix typing issue with gRPC metadata when key ends in -bin chore: Update gapic-generator-python to v1.21.0 docs: Update io.proto to use markdown headings instead of HTML, remove some unused HTML from END_COMMIT_OVERRIDE - [ ] Regenerate this pull request now. fix: Fix typing issue with gRPC metadata when key ends in -bin chore: Update gapic-generator-python to v1.21.0 PiperOrigin-RevId: 705285820 Source-Link: https://github.com/googleapis/googleapis/commit/f9b8b9150f7fcd600b0acaeef91236b1843f5e49 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ca1e0a1e472d6e6f5de883a5cb54724f112ce348 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLWF1dG9tbC8uT3dsQm90LnlhbWwiLCJoIjoiY2ExZTBhMWU0NzJkNmU2ZjVkZTg4M2E1Y2I1NDcyNGYxMTJjZTM0OCJ9 BEGIN_NESTED_COMMIT docs: [google-cloud-automl] Update io.proto to use markdown headings instead of HTML, remove some unused HTML from markdown PiperOrigin-RevId: 703192272 Source-Link: https://github.com/googleapis/googleapis/commit/05347e0a68ac16ec35146249d598fcdc6dd8c0c5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/71b95495d6fb29d970ead6a17752d58d1efc8ffb Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLWF1dG9tbC8uT3dsQm90LnlhbWwiLCJoIjoiNzFiOTU0OTVkNmZiMjlkOTcwZWFkNmExNzc1MmQ1OGQxZWZjOGZmYiJ9 END_NESTED_COMMIT --------- Co-authored-by: Owl Bot Co-authored-by: ohmayr --- .../google/cloud/automl/gapic_version.py | 2 +- .../google/cloud/automl_v1/gapic_version.py | 2 +- .../services/auto_ml/async_client.py | 176 +- .../automl_v1/services/auto_ml/client.py | 181 +- .../automl_v1/services/auto_ml/pagers.py | 48 +- .../services/auto_ml/transports/grpc.py | 130 +- .../auto_ml/transports/grpc_asyncio.py | 127 +- .../services/auto_ml/transports/rest.py | 1166 ++++++++++++- .../prediction_service/async_client.py | 48 +- .../services/prediction_service/client.py | 53 +- .../prediction_service/transports/grpc.py | 98 +- .../transports/grpc_asyncio.py | 95 +- .../prediction_service/transports/rest.py | 140 +- .../google/cloud/automl_v1/types/io.py | 149 +- .../cloud/automl_v1beta1/gapic_version.py | 2 +- .../services/auto_ml/async_client.py | 224 ++- .../automl_v1beta1/services/auto_ml/client.py | 229 ++- .../automl_v1beta1/services/auto_ml/pagers.py | 80 +- .../services/auto_ml/transports/grpc.py | 142 +- .../auto_ml/transports/grpc_asyncio.py | 139 +- .../services/auto_ml/transports/rest.py | 1544 +++++++++++++++-- .../prediction_service/async_client.py | 48 +- .../services/prediction_service/client.py | 53 +- .../prediction_service/transports/grpc.py | 98 +- .../transports/grpc_asyncio.py | 95 +- .../prediction_service/transports/rest.py | 140 +- ...ippet_metadata_google.cloud.automl.v1.json | 82 +- ..._metadata_google.cloud.automl.v1beta1.json | 106 +- .../unit/gapic/automl_v1/test_auto_ml.py | 90 + .../automl_v1/test_prediction_service.py | 10 + .../unit/gapic/automl_v1beta1/test_auto_ml.py | 120 ++ .../automl_v1beta1/test_prediction_service.py | 10 + 32 files changed, 4757 insertions(+), 870 deletions(-) diff --git a/packages/google-cloud-automl/google/cloud/automl/gapic_version.py b/packages/google-cloud-automl/google/cloud/automl/gapic_version.py index 2523dfbe9e23..558c8aab67c5 100644 --- a/packages/google-cloud-automl/google/cloud/automl/gapic_version.py +++ b/packages/google-cloud-automl/google/cloud/automl/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/gapic_version.py b/packages/google-cloud-automl/google/cloud/automl_v1/gapic_version.py index 2523dfbe9e23..558c8aab67c5 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/gapic_version.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py index 84852b458c9f..324a8847294a 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import re from typing import ( Callable, @@ -69,6 +70,15 @@ from .transports.base import DEFAULT_CLIENT_INFO, AutoMlTransport from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class AutoMlAsyncClient: """AutoML Server API. @@ -285,6 +295,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1.AutoMlAsyncClient`.", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1.AutoMl", + "credentialsType": None, + }, + ) + async def create_dataset( self, request: Optional[Union[service.CreateDatasetRequest, dict]] = None, @@ -293,7 +325,7 @@ async def create_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a dataset. @@ -351,8 +383,10 @@ async def sample_create_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -425,7 +459,7 @@ async def get_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Gets a dataset. @@ -469,8 +503,10 @@ async def sample_get_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Dataset: @@ -533,7 +569,7 @@ async def list_datasets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatasetsAsyncPager: r"""Lists datasets in a project. @@ -578,8 +614,10 @@ async def sample_list_datasets(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager: @@ -655,7 +693,7 @@ async def update_dataset( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Updates a dataset. @@ -710,8 +748,10 @@ async def sample_update_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Dataset: @@ -778,7 +818,7 @@ async def delete_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a dataset and all of its contents. Returns empty response in the @@ -830,8 +870,10 @@ async def sample_delete_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -911,7 +953,7 @@ async def import_data( input_config: Optional[io.InputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Imports data into a dataset. For Tables this method can only be called on an empty Dataset. @@ -981,8 +1023,10 @@ async def sample_import_data(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1064,7 +1108,7 @@ async def export_data( output_config: Optional[io.OutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Exports dataset's data to the provided output location. Returns an empty response in the @@ -1126,8 +1170,10 @@ async def sample_export_data(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1208,7 +1254,7 @@ async def get_annotation_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Gets an annotation spec. @@ -1252,8 +1298,10 @@ async def sample_get_annotation_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.AnnotationSpec: @@ -1313,7 +1361,7 @@ async def create_model( model: Optional[gca_model.Model] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a model. Returns a Model in the [response][google.longrunning.Operation.response] field when it @@ -1371,8 +1419,10 @@ async def sample_create_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1446,7 +1496,7 @@ async def get_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Gets a model. @@ -1488,8 +1538,10 @@ async def sample_get_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Model: @@ -1550,7 +1602,7 @@ async def list_models( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelsAsyncPager: r"""Lists models. @@ -1595,8 +1647,10 @@ async def sample_list_models(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager: @@ -1671,7 +1725,7 @@ async def delete_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a model. Returns ``google.protobuf.Empty`` in the [response][google.longrunning.Operation.response] field when it @@ -1722,8 +1776,10 @@ async def sample_delete_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1803,7 +1859,7 @@ async def update_model( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_model.Model: r"""Updates a model. @@ -1853,8 +1909,10 @@ async def sample_update_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Model: @@ -1919,7 +1977,7 @@ async def deploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deploys a model. If a model is already deployed, deploying it with the same parameters has no effect. Deploying with different @@ -1980,8 +2038,10 @@ async def sample_deploy_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2060,7 +2120,7 @@ async def undeploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Undeploys a model. If the model is not deployed this method has no effect. @@ -2116,8 +2176,10 @@ async def sample_undeploy_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2197,7 +2259,7 @@ async def export_model( output_config: Optional[io.ModelExportOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Exports a trained, "export-able", model to a user specified Google Cloud Storage location. A model is considered export-able @@ -2265,8 +2327,10 @@ async def sample_export_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2347,7 +2411,7 @@ async def get_model_evaluation( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Gets a model evaluation. @@ -2391,8 +2455,10 @@ async def sample_get_model_evaluation(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.ModelEvaluation: @@ -2452,7 +2518,7 @@ async def list_model_evaluations( filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelEvaluationsAsyncPager: r"""Lists model evaluations. @@ -2519,8 +2585,10 @@ async def sample_list_model_evaluations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/client.py index 187d7c17a984..0883ef0ea2e5 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.protobuf import empty_pb2 # type: ignore @@ -681,6 +691,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -743,6 +757,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1.AutoMlClient`.", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1.AutoMl", + "credentialsType": None, + }, + ) + def create_dataset( self, request: Optional[Union[service.CreateDatasetRequest, dict]] = None, @@ -751,7 +788,7 @@ def create_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a dataset. @@ -809,8 +846,10 @@ def sample_create_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -880,7 +919,7 @@ def get_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Gets a dataset. @@ -924,8 +963,10 @@ def sample_get_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Dataset: @@ -985,7 +1026,7 @@ def list_datasets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatasetsPager: r"""Lists datasets in a project. @@ -1030,8 +1071,10 @@ def sample_list_datasets(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager: @@ -1104,7 +1147,7 @@ def update_dataset( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Updates a dataset. @@ -1159,8 +1202,10 @@ def sample_update_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Dataset: @@ -1224,7 +1269,7 @@ def delete_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deletes a dataset and all of its contents. Returns empty response in the @@ -1276,8 +1321,10 @@ def sample_delete_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1354,7 +1401,7 @@ def import_data( input_config: Optional[io.InputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Imports data into a dataset. For Tables this method can only be called on an empty Dataset. @@ -1424,8 +1471,10 @@ def sample_import_data(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1504,7 +1553,7 @@ def export_data( output_config: Optional[io.OutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Exports dataset's data to the provided output location. Returns an empty response in the @@ -1566,8 +1615,10 @@ def sample_export_data(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1645,7 +1696,7 @@ def get_annotation_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Gets an annotation spec. @@ -1689,8 +1740,10 @@ def sample_get_annotation_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.AnnotationSpec: @@ -1747,7 +1800,7 @@ def create_model( model: Optional[gca_model.Model] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a model. Returns a Model in the [response][google.longrunning.Operation.response] field when it @@ -1805,8 +1858,10 @@ def sample_create_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1877,7 +1932,7 @@ def get_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Gets a model. @@ -1919,8 +1974,10 @@ def sample_get_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Model: @@ -1978,7 +2035,7 @@ def list_models( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelsPager: r"""Lists models. @@ -2023,8 +2080,10 @@ def sample_list_models(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager: @@ -2096,7 +2155,7 @@ def delete_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deletes a model. Returns ``google.protobuf.Empty`` in the [response][google.longrunning.Operation.response] field when it @@ -2147,8 +2206,10 @@ def sample_delete_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2225,7 +2286,7 @@ def update_model( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_model.Model: r"""Updates a model. @@ -2275,8 +2336,10 @@ def sample_update_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.Model: @@ -2338,7 +2401,7 @@ def deploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deploys a model. If a model is already deployed, deploying it with the same parameters has no effect. Deploying with different @@ -2399,8 +2462,10 @@ def sample_deploy_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2476,7 +2541,7 @@ def undeploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Undeploys a model. If the model is not deployed this method has no effect. @@ -2532,8 +2597,10 @@ def sample_undeploy_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2610,7 +2677,7 @@ def export_model( output_config: Optional[io.ModelExportOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Exports a trained, "export-able", model to a user specified Google Cloud Storage location. A model is considered export-able @@ -2678,8 +2745,10 @@ def sample_export_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2757,7 +2826,7 @@ def get_model_evaluation( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Gets a model evaluation. @@ -2801,8 +2870,10 @@ def sample_get_model_evaluation(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.ModelEvaluation: @@ -2859,7 +2930,7 @@ def list_model_evaluations( filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelEvaluationsPager: r"""Lists model evaluations. @@ -2926,8 +2997,10 @@ def sample_list_model_evaluations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/pagers.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/pagers.py index 1f3b1628ff8d..e03963510a25 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/pagers.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListDatasetsRequest(request) @@ -141,7 +143,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -155,8 +157,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListDatasetsRequest(request) @@ -219,7 +223,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -233,8 +237,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelsRequest(request) @@ -293,7 +299,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -307,8 +313,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelsRequest(request) @@ -371,7 +379,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -385,8 +393,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelEvaluationsRequest(request) @@ -445,7 +455,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -459,8 +469,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelEvaluationsRequest(request) diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc.py index 0715c847081b..b95bc3906f7e 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -21,7 +24,10 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.automl_v1.types import annotation_spec from google.cloud.automl_v1.types import dataset @@ -32,6 +38,81 @@ from .base import DEFAULT_CLIENT_INFO, AutoMlTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class AutoMlGrpcTransport(AutoMlTransport): """gRPC backend transport for AutoMl. @@ -200,7 +281,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -264,7 +350,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -288,7 +376,7 @@ def create_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_dataset" not in self._stubs: - self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + self._stubs["create_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/CreateDataset", request_serializer=service.CreateDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -312,7 +400,7 @@ def get_dataset(self) -> Callable[[service.GetDatasetRequest], dataset.Dataset]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_dataset" not in self._stubs: - self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + self._stubs["get_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetDataset", request_serializer=service.GetDatasetRequest.serialize, response_deserializer=dataset.Dataset.deserialize, @@ -338,7 +426,7 @@ def list_datasets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_datasets" not in self._stubs: - self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + self._stubs["list_datasets"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListDatasets", request_serializer=service.ListDatasetsRequest.serialize, response_deserializer=service.ListDatasetsResponse.deserialize, @@ -364,7 +452,7 @@ def update_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_dataset" not in self._stubs: - self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + self._stubs["update_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UpdateDataset", request_serializer=service.UpdateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -394,7 +482,7 @@ def delete_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_dataset" not in self._stubs: - self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + self._stubs["delete_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeleteDataset", request_serializer=service.DeleteDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -429,7 +517,7 @@ def import_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "import_data" not in self._stubs: - self._stubs["import_data"] = self.grpc_channel.unary_unary( + self._stubs["import_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ImportData", request_serializer=service.ImportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -458,7 +546,7 @@ def export_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_data" not in self._stubs: - self._stubs["export_data"] = self.grpc_channel.unary_unary( + self._stubs["export_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ExportData", request_serializer=service.ExportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -484,7 +572,7 @@ def get_annotation_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_annotation_spec" not in self._stubs: - self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_annotation_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec", request_serializer=service.GetAnnotationSpecRequest.serialize, response_deserializer=annotation_spec.AnnotationSpec.deserialize, @@ -514,7 +602,7 @@ def create_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model" not in self._stubs: - self._stubs["create_model"] = self.grpc_channel.unary_unary( + self._stubs["create_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/CreateModel", request_serializer=service.CreateModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -538,7 +626,7 @@ def get_model(self) -> Callable[[service.GetModelRequest], model.Model]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model" not in self._stubs: - self._stubs["get_model"] = self.grpc_channel.unary_unary( + self._stubs["get_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetModel", request_serializer=service.GetModelRequest.serialize, response_deserializer=model.Model.deserialize, @@ -564,7 +652,7 @@ def list_models( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_models" not in self._stubs: - self._stubs["list_models"] = self.grpc_channel.unary_unary( + self._stubs["list_models"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListModels", request_serializer=service.ListModelsRequest.serialize, response_deserializer=service.ListModelsResponse.deserialize, @@ -593,7 +681,7 @@ def delete_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model" not in self._stubs: - self._stubs["delete_model"] = self.grpc_channel.unary_unary( + self._stubs["delete_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeleteModel", request_serializer=service.DeleteModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -617,7 +705,7 @@ def update_model(self) -> Callable[[service.UpdateModelRequest], gca_model.Model # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_model" not in self._stubs: - self._stubs["update_model"] = self.grpc_channel.unary_unary( + self._stubs["update_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UpdateModel", request_serializer=service.UpdateModelRequest.serialize, response_deserializer=gca_model.Model.deserialize, @@ -656,7 +744,7 @@ def deploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "deploy_model" not in self._stubs: - self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + self._stubs["deploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeployModel", request_serializer=service.DeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -690,7 +778,7 @@ def undeploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undeploy_model" not in self._stubs: - self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + self._stubs["undeploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UndeployModel", request_serializer=service.UndeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -723,7 +811,7 @@ def export_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_model" not in self._stubs: - self._stubs["export_model"] = self.grpc_channel.unary_unary( + self._stubs["export_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ExportModel", request_serializer=service.ExportModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -751,7 +839,7 @@ def get_model_evaluation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation" not in self._stubs: - self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + self._stubs["get_model_evaluation"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", request_serializer=service.GetModelEvaluationRequest.serialize, response_deserializer=model_evaluation.ModelEvaluation.deserialize, @@ -779,7 +867,7 @@ def list_model_evaluations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluations" not in self._stubs: - self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + self._stubs["list_model_evaluations"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", request_serializer=service.ListModelEvaluationsRequest.serialize, response_deserializer=service.ListModelEvaluationsResponse.deserialize, @@ -787,7 +875,7 @@ def list_model_evaluations( return self._stubs["list_model_evaluations"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py index f9172d736fb3..68562bb5abd5 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import logging as std_logging +import pickle from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -23,8 +26,11 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore from grpc.experimental import aio # type: ignore +import proto # type: ignore from google.cloud.automl_v1.types import annotation_spec from google.cloud.automl_v1.types import dataset @@ -36,6 +42,82 @@ from .base import DEFAULT_CLIENT_INFO, AutoMlTransport from .grpc import AutoMlGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class AutoMlGrpcAsyncIOTransport(AutoMlTransport): """gRPC AsyncIO backend transport for AutoMl. @@ -247,10 +329,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -273,7 +358,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -298,7 +383,7 @@ def create_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_dataset" not in self._stubs: - self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + self._stubs["create_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/CreateDataset", request_serializer=service.CreateDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -324,7 +409,7 @@ def get_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_dataset" not in self._stubs: - self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + self._stubs["get_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetDataset", request_serializer=service.GetDatasetRequest.serialize, response_deserializer=dataset.Dataset.deserialize, @@ -352,7 +437,7 @@ def list_datasets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_datasets" not in self._stubs: - self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + self._stubs["list_datasets"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListDatasets", request_serializer=service.ListDatasetsRequest.serialize, response_deserializer=service.ListDatasetsResponse.deserialize, @@ -378,7 +463,7 @@ def update_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_dataset" not in self._stubs: - self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + self._stubs["update_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UpdateDataset", request_serializer=service.UpdateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -408,7 +493,7 @@ def delete_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_dataset" not in self._stubs: - self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + self._stubs["delete_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeleteDataset", request_serializer=service.DeleteDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -443,7 +528,7 @@ def import_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "import_data" not in self._stubs: - self._stubs["import_data"] = self.grpc_channel.unary_unary( + self._stubs["import_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ImportData", request_serializer=service.ImportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -472,7 +557,7 @@ def export_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_data" not in self._stubs: - self._stubs["export_data"] = self.grpc_channel.unary_unary( + self._stubs["export_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ExportData", request_serializer=service.ExportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -500,7 +585,7 @@ def get_annotation_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_annotation_spec" not in self._stubs: - self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_annotation_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec", request_serializer=service.GetAnnotationSpecRequest.serialize, response_deserializer=annotation_spec.AnnotationSpec.deserialize, @@ -530,7 +615,7 @@ def create_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model" not in self._stubs: - self._stubs["create_model"] = self.grpc_channel.unary_unary( + self._stubs["create_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/CreateModel", request_serializer=service.CreateModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -554,7 +639,7 @@ def get_model(self) -> Callable[[service.GetModelRequest], Awaitable[model.Model # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model" not in self._stubs: - self._stubs["get_model"] = self.grpc_channel.unary_unary( + self._stubs["get_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetModel", request_serializer=service.GetModelRequest.serialize, response_deserializer=model.Model.deserialize, @@ -580,7 +665,7 @@ def list_models( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_models" not in self._stubs: - self._stubs["list_models"] = self.grpc_channel.unary_unary( + self._stubs["list_models"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListModels", request_serializer=service.ListModelsRequest.serialize, response_deserializer=service.ListModelsResponse.deserialize, @@ -609,7 +694,7 @@ def delete_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model" not in self._stubs: - self._stubs["delete_model"] = self.grpc_channel.unary_unary( + self._stubs["delete_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeleteModel", request_serializer=service.DeleteModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -635,7 +720,7 @@ def update_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_model" not in self._stubs: - self._stubs["update_model"] = self.grpc_channel.unary_unary( + self._stubs["update_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UpdateModel", request_serializer=service.UpdateModelRequest.serialize, response_deserializer=gca_model.Model.deserialize, @@ -674,7 +759,7 @@ def deploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "deploy_model" not in self._stubs: - self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + self._stubs["deploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/DeployModel", request_serializer=service.DeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -708,7 +793,7 @@ def undeploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undeploy_model" not in self._stubs: - self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + self._stubs["undeploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/UndeployModel", request_serializer=service.UndeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -741,7 +826,7 @@ def export_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_model" not in self._stubs: - self._stubs["export_model"] = self.grpc_channel.unary_unary( + self._stubs["export_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ExportModel", request_serializer=service.ExportModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -769,7 +854,7 @@ def get_model_evaluation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation" not in self._stubs: - self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + self._stubs["get_model_evaluation"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", request_serializer=service.GetModelEvaluationRequest.serialize, response_deserializer=model_evaluation.ModelEvaluation.deserialize, @@ -798,7 +883,7 @@ def list_model_evaluations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluations" not in self._stubs: - self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + self._stubs["list_model_evaluations"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", request_serializer=service.ListModelEvaluationsRequest.serialize, response_deserializer=service.ListModelEvaluationsResponse.deserialize, @@ -996,7 +1081,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/rest.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/rest.py index bf07aec84ac8..e94c4df2a09e 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/rest.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/transports/rest.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import dataclasses import json # type: ignore +import logging from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings @@ -43,6 +43,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -217,8 +225,10 @@ def post_update_model(self, response): """ def pre_create_dataset( - self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.CreateDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for create_dataset Override in a subclass to manipulate the request or metadata @@ -238,8 +248,10 @@ def post_create_dataset( return response def pre_create_model( - self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.CreateModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for create_model Override in a subclass to manipulate the request or metadata @@ -259,8 +271,10 @@ def post_create_model( return response def pre_delete_dataset( - self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeleteDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_dataset Override in a subclass to manipulate the request or metadata @@ -280,8 +294,10 @@ def post_delete_dataset( return response def pre_delete_model( - self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeleteModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_model Override in a subclass to manipulate the request or metadata @@ -301,8 +317,10 @@ def post_delete_model( return response def pre_deploy_model( - self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeployModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for deploy_model Override in a subclass to manipulate the request or metadata @@ -322,8 +340,10 @@ def post_deploy_model( return response def pre_export_data( - self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ExportDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for export_data Override in a subclass to manipulate the request or metadata @@ -343,8 +363,10 @@ def post_export_data( return response def pre_export_model( - self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ExportModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for export_model Override in a subclass to manipulate the request or metadata @@ -366,8 +388,10 @@ def post_export_model( def pre_get_annotation_spec( self, request: service.GetAnnotationSpecRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.GetAnnotationSpecRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_annotation_spec Override in a subclass to manipulate the request or metadata @@ -387,8 +411,10 @@ def post_get_annotation_spec( return response def pre_get_dataset( - self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_dataset Override in a subclass to manipulate the request or metadata @@ -406,8 +432,10 @@ def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: return response def pre_get_model( - self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_model Override in a subclass to manipulate the request or metadata @@ -427,8 +455,10 @@ def post_get_model(self, response: model.Model) -> model.Model: def pre_get_model_evaluation( self, request: service.GetModelEvaluationRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.GetModelEvaluationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_model_evaluation Override in a subclass to manipulate the request or metadata @@ -448,8 +478,10 @@ def post_get_model_evaluation( return response def pre_import_data( - self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ImportDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for import_data Override in a subclass to manipulate the request or metadata @@ -469,8 +501,10 @@ def post_import_data( return response def pre_list_datasets( - self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ListDatasetsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_datasets Override in a subclass to manipulate the request or metadata @@ -492,8 +526,10 @@ def post_list_datasets( def pre_list_model_evaluations( self, request: service.ListModelEvaluationsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.ListModelEvaluationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_model_evaluations Override in a subclass to manipulate the request or metadata @@ -513,8 +549,10 @@ def post_list_model_evaluations( return response def pre_list_models( - self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ListModelsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_models Override in a subclass to manipulate the request or metadata @@ -534,8 +572,10 @@ def post_list_models( return response def pre_undeploy_model( - self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.UndeployModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for undeploy_model Override in a subclass to manipulate the request or metadata @@ -555,8 +595,10 @@ def post_undeploy_model( return response def pre_update_dataset( - self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.UpdateDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_dataset Override in a subclass to manipulate the request or metadata @@ -574,8 +616,10 @@ def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Data return response def pre_update_model( - self, request: service.UpdateModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.UpdateModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.UpdateModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UpdateModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_model Override in a subclass to manipulate the request or metadata @@ -787,7 +831,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create dataset method over HTTP. @@ -798,8 +842,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -812,6 +858,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseCreateDataset._get_http_options() ) + request, metadata = self._interceptor.pre_create_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseCreateDataset._get_transcoded_request( @@ -830,6 +877,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.CreateDataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "CreateDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._CreateDataset._get_response( self._host, @@ -849,7 +923,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.create_dataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "CreateDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateModel(_BaseAutoMlRestTransport._BaseCreateModel, AutoMlRestStub): @@ -885,7 +981,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create model method over HTTP. @@ -896,8 +992,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -908,6 +1006,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseCreateModel._get_http_options() + request, metadata = self._interceptor.pre_create_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseCreateModel._get_transcoded_request( @@ -926,6 +1025,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.CreateModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "CreateModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._CreateModel._get_response( self._host, @@ -945,7 +1071,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.create_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "CreateModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteDataset(_BaseAutoMlRestTransport._BaseDeleteDataset, AutoMlRestStub): @@ -980,7 +1128,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the delete dataset method over HTTP. @@ -991,8 +1139,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1005,6 +1155,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseDeleteDataset._get_http_options() ) + request, metadata = self._interceptor.pre_delete_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeleteDataset._get_transcoded_request( @@ -1019,6 +1170,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.DeleteDataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeleteDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeleteDataset._get_response( self._host, @@ -1037,7 +1215,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.delete_dataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeleteDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteModel(_BaseAutoMlRestTransport._BaseDeleteModel, AutoMlRestStub): @@ -1072,7 +1272,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the delete model method over HTTP. @@ -1083,8 +1283,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1095,6 +1297,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseDeleteModel._get_http_options() + request, metadata = self._interceptor.pre_delete_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeleteModel._get_transcoded_request( @@ -1109,6 +1312,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.DeleteModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeleteModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeleteModel._get_response( self._host, @@ -1127,7 +1357,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.delete_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeleteModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeployModel(_BaseAutoMlRestTransport._BaseDeployModel, AutoMlRestStub): @@ -1163,7 +1415,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the deploy model method over HTTP. @@ -1174,8 +1426,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1186,6 +1440,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseDeployModel._get_http_options() + request, metadata = self._interceptor.pre_deploy_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeployModel._get_transcoded_request( @@ -1204,6 +1459,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.DeployModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeployModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeployModel._get_response( self._host, @@ -1223,7 +1505,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_deploy_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.deploy_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "DeployModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExportData(_BaseAutoMlRestTransport._BaseExportData, AutoMlRestStub): @@ -1259,7 +1563,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the export data method over HTTP. @@ -1270,8 +1574,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1282,6 +1588,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseExportData._get_http_options() + request, metadata = self._interceptor.pre_export_data(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseExportData._get_transcoded_request( @@ -1300,6 +1607,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ExportData", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ExportData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ExportData._get_response( self._host, @@ -1319,7 +1653,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_data(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.export_data", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ExportData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExportModel(_BaseAutoMlRestTransport._BaseExportModel, AutoMlRestStub): @@ -1355,7 +1711,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the export model method over HTTP. @@ -1368,8 +1724,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1380,6 +1738,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseExportModel._get_http_options() + request, metadata = self._interceptor.pre_export_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseExportModel._get_transcoded_request( @@ -1398,6 +1757,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ExportModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ExportModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ExportModel._get_response( self._host, @@ -1417,7 +1803,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.export_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ExportModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetAnnotationSpec( @@ -1454,7 +1862,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Call the get annotation spec method over HTTP. @@ -1465,8 +1873,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.annotation_spec.AnnotationSpec: @@ -1476,6 +1886,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetAnnotationSpec._get_http_options() ) + request, metadata = self._interceptor.pre_get_annotation_spec( request, metadata ) @@ -1492,6 +1903,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.GetAnnotationSpec", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetAnnotationSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetAnnotationSpec._get_response( self._host, @@ -1512,7 +1950,29 @@ def __call__( pb_resp = annotation_spec.AnnotationSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_annotation_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = annotation_spec.AnnotationSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.get_annotation_spec", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetAnnotationSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetDataset(_BaseAutoMlRestTransport._BaseGetDataset, AutoMlRestStub): @@ -1547,7 +2007,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Call the get dataset method over HTTP. @@ -1558,8 +2018,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.dataset.Dataset: @@ -1571,6 +2033,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseGetDataset._get_http_options() + request, metadata = self._interceptor.pre_get_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetDataset._get_transcoded_request( @@ -1585,6 +2048,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.GetDataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetDataset._get_response( self._host, @@ -1605,7 +2095,29 @@ def __call__( pb_resp = dataset.Dataset.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = dataset.Dataset.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.get_dataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetModel(_BaseAutoMlRestTransport._BaseGetModel, AutoMlRestStub): @@ -1640,7 +2152,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Call the get model method over HTTP. @@ -1651,8 +2163,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.model.Model: @@ -1662,6 +2176,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseGetModel._get_http_options() + request, metadata = self._interceptor.pre_get_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetModel._get_transcoded_request( @@ -1676,6 +2191,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.GetModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetModel._get_response( self._host, @@ -1696,7 +2238,29 @@ def __call__( pb_resp = model.Model.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = model.Model.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.get_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetModelEvaluation( @@ -1733,7 +2297,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Call the get model evaluation method over HTTP. @@ -1744,8 +2308,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.model_evaluation.ModelEvaluation: @@ -1755,6 +2321,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetModelEvaluation._get_http_options() ) + request, metadata = self._interceptor.pre_get_model_evaluation( request, metadata ) @@ -1769,6 +2336,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.GetModelEvaluation", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetModelEvaluation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetModelEvaluation._get_response( self._host, @@ -1789,7 +2383,31 @@ def __call__( pb_resp = model_evaluation.ModelEvaluation.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_evaluation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = model_evaluation.ModelEvaluation.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.get_model_evaluation", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "GetModelEvaluation", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ImportData(_BaseAutoMlRestTransport._BaseImportData, AutoMlRestStub): @@ -1825,7 +2443,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the import data method over HTTP. @@ -1836,8 +2454,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1848,6 +2468,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseImportData._get_http_options() + request, metadata = self._interceptor.pre_import_data(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseImportData._get_transcoded_request( @@ -1866,6 +2487,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ImportData", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ImportData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ImportData._get_response( self._host, @@ -1885,7 +2533,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_data(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.import_data", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ImportData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListDatasets(_BaseAutoMlRestTransport._BaseListDatasets, AutoMlRestStub): @@ -1920,7 +2590,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListDatasetsResponse: r"""Call the list datasets method over HTTP. @@ -1931,8 +2601,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListDatasetsResponse: @@ -1944,6 +2616,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListDatasets._get_http_options() ) + request, metadata = self._interceptor.pre_list_datasets(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseListDatasets._get_transcoded_request( @@ -1958,6 +2631,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ListDatasets", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListDatasets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListDatasets._get_response( self._host, @@ -1978,7 +2678,29 @@ def __call__( pb_resp = service.ListDatasetsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_datasets(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListDatasetsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.list_datasets", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListDatasets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListModelEvaluations( @@ -2015,7 +2737,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListModelEvaluationsResponse: r"""Call the list model evaluations method over HTTP. @@ -2026,8 +2748,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListModelEvaluationsResponse: @@ -2039,6 +2763,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListModelEvaluations._get_http_options() ) + request, metadata = self._interceptor.pre_list_model_evaluations( request, metadata ) @@ -2051,6 +2776,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ListModelEvaluations", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListModelEvaluations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListModelEvaluations._get_response( self._host, @@ -2071,7 +2823,31 @@ def __call__( pb_resp = service.ListModelEvaluationsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_evaluations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListModelEvaluationsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.list_model_evaluations", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListModelEvaluations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListModels(_BaseAutoMlRestTransport._BaseListModels, AutoMlRestStub): @@ -2106,7 +2882,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListModelsResponse: r"""Call the list models method over HTTP. @@ -2117,8 +2893,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListModelsResponse: @@ -2128,6 +2906,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseListModels._get_http_options() + request, metadata = self._interceptor.pre_list_models(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseListModels._get_transcoded_request( @@ -2142,6 +2921,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.ListModels", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListModels", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListModels._get_response( self._host, @@ -2162,7 +2968,29 @@ def __call__( pb_resp = service.ListModelsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListModelsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.list_models", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "ListModels", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UndeployModel(_BaseAutoMlRestTransport._BaseUndeployModel, AutoMlRestStub): @@ -2198,7 +3026,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the undeploy model method over HTTP. @@ -2209,8 +3037,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2223,6 +3053,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUndeployModel._get_http_options() ) + request, metadata = self._interceptor.pre_undeploy_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseUndeployModel._get_transcoded_request( @@ -2241,6 +3072,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.UndeployModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UndeployModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UndeployModel._get_response( self._host, @@ -2260,7 +3118,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undeploy_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.undeploy_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UndeployModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateDataset(_BaseAutoMlRestTransport._BaseUpdateDataset, AutoMlRestStub): @@ -2296,7 +3176,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Call the update dataset method over HTTP. @@ -2307,8 +3187,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_dataset.Dataset: @@ -2322,6 +3204,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUpdateDataset._get_http_options() ) + request, metadata = self._interceptor.pre_update_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseUpdateDataset._get_transcoded_request( @@ -2340,6 +3223,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.UpdateDataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UpdateDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UpdateDataset._get_response( self._host, @@ -2361,7 +3271,29 @@ def __call__( pb_resp = gca_dataset.Dataset.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_dataset.Dataset.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.update_dataset", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UpdateDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateModel(_BaseAutoMlRestTransport._BaseUpdateModel, AutoMlRestStub): @@ -2397,7 +3329,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_model.Model: r"""Call the update model method over HTTP. @@ -2408,8 +3340,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_model.Model: @@ -2419,6 +3353,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseUpdateModel._get_http_options() + request, metadata = self._interceptor.pre_update_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseUpdateModel._get_transcoded_request( @@ -2437,6 +3372,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.AutoMlClient.UpdateModel", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UpdateModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UpdateModel._get_response( self._host, @@ -2458,7 +3420,29 @@ def __call__( pb_resp = gca_model.Model.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_model.Model.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.AutoMlClient.update_model", + extra={ + "serviceName": "google.cloud.automl.v1.AutoMl", + "rpcName": "UpdateModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py index d82bdf58cd78..424d228937c1 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import re from typing import ( Callable, @@ -57,6 +58,15 @@ from .transports.base import DEFAULT_CLIENT_INFO, PredictionServiceTransport from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class PredictionServiceAsyncClient: """AutoML Prediction API. @@ -270,6 +280,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1.PredictionServiceAsyncClient`.", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1.PredictionService", + "credentialsType": None, + }, + ) + async def predict( self, request: Optional[Union[prediction_service.PredictRequest, dict]] = None, @@ -279,7 +311,7 @@ async def predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Perform an online prediction. The prediction result is directly returned in the response. Available for following ML scenarios, @@ -406,8 +438,10 @@ async def sample_predict(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.PredictResponse: @@ -474,7 +508,7 @@ async def batch_predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], @@ -653,8 +687,10 @@ async def sample_batch_predict(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/client.py index ba4d427b59db..0ca9e7b01946 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -594,6 +604,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -660,6 +674,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1.PredictionServiceClient`.", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1.PredictionService", + "credentialsType": None, + }, + ) + def predict( self, request: Optional[Union[prediction_service.PredictRequest, dict]] = None, @@ -669,7 +706,7 @@ def predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Perform an online prediction. The prediction result is directly returned in the response. Available for following ML scenarios, @@ -796,8 +833,10 @@ def sample_predict(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1.types.PredictResponse: @@ -862,7 +901,7 @@ def batch_predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], @@ -1041,8 +1080,10 @@ def sample_batch_predict(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc.py index 1db17b092455..0ca68c269136 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -21,12 +24,90 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.automl_v1.types import prediction_service from .base import DEFAULT_CLIENT_INFO, PredictionServiceTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class PredictionServiceGrpcTransport(PredictionServiceTransport): """gRPC backend transport for PredictionService. @@ -184,7 +265,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -248,7 +334,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -310,7 +398,7 @@ def predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "predict" not in self._stubs: - self._stubs["predict"] = self.grpc_channel.unary_unary( + self._stubs["predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.PredictionService/Predict", request_serializer=prediction_service.PredictRequest.serialize, response_deserializer=prediction_service.PredictResponse.deserialize, @@ -355,7 +443,7 @@ def batch_predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_predict" not in self._stubs: - self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + self._stubs["batch_predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.PredictionService/BatchPredict", request_serializer=prediction_service.BatchPredictRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -363,7 +451,7 @@ def batch_predict( return self._stubs["batch_predict"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py index 7141a3b758f4..64d0a8d8fcad 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import logging as std_logging +import pickle from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -23,14 +26,93 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore from grpc.experimental import aio # type: ignore +import proto # type: ignore from google.cloud.automl_v1.types import prediction_service from .base import DEFAULT_CLIENT_INFO, PredictionServiceTransport from .grpc import PredictionServiceGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): """gRPC AsyncIO backend transport for PredictionService. @@ -231,10 +313,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -257,7 +342,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -321,7 +406,7 @@ def predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "predict" not in self._stubs: - self._stubs["predict"] = self.grpc_channel.unary_unary( + self._stubs["predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.PredictionService/Predict", request_serializer=prediction_service.PredictRequest.serialize, response_deserializer=prediction_service.PredictResponse.deserialize, @@ -368,7 +453,7 @@ def batch_predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_predict" not in self._stubs: - self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + self._stubs["batch_predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1.PredictionService/BatchPredict", request_serializer=prediction_service.BatchPredictRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -396,7 +481,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/rest.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/rest.py index c6b8978f3d62..5b8981acfe35 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/rest.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/transports/rest.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import dataclasses import json # type: ignore +import logging from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings @@ -38,6 +38,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -86,8 +94,10 @@ def post_predict(self, response): def pre_batch_predict( self, request: prediction_service.BatchPredictRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.BatchPredictRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for batch_predict Override in a subclass to manipulate the request or metadata @@ -109,8 +119,10 @@ def post_batch_predict( def pre_predict( self, request: prediction_service.PredictRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.PredictRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for predict Override in a subclass to manipulate the request or metadata @@ -315,7 +327,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the batch predict method over HTTP. @@ -326,8 +338,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -340,6 +354,7 @@ def __call__( http_options = ( _BasePredictionServiceRestTransport._BaseBatchPredict._get_http_options() ) + request, metadata = self._interceptor.pre_batch_predict(request, metadata) transcoded_request = _BasePredictionServiceRestTransport._BaseBatchPredict._get_transcoded_request( http_options, request @@ -354,6 +369,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.PredictionServiceClient.BatchPredict", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": "BatchPredict", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = PredictionServiceRestTransport._BatchPredict._get_response( self._host, @@ -373,7 +415,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_predict(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.PredictionServiceClient.batch_predict", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": "BatchPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _Predict( @@ -411,7 +475,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Call the predict method over HTTP. @@ -422,8 +486,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.prediction_service.PredictResponse: @@ -435,6 +501,7 @@ def __call__( http_options = ( _BasePredictionServiceRestTransport._BasePredict._get_http_options() ) + request, metadata = self._interceptor.pre_predict(request, metadata) transcoded_request = _BasePredictionServiceRestTransport._BasePredict._get_transcoded_request( http_options, request @@ -453,6 +520,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1.PredictionServiceClient.Predict", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": "Predict", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = PredictionServiceRestTransport._Predict._get_response( self._host, @@ -474,7 +568,31 @@ def __call__( pb_resp = prediction_service.PredictResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_predict(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = prediction_service.PredictResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1.PredictionServiceClient.predict", + extra={ + "serviceName": "google.cloud.automl.v1.PredictionService", + "rpcName": "Predict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/types/io.py b/packages/google-cloud-automl/google/cloud/automl_v1/types/io.py index 5a9d6762302f..8b14c11e07d4 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/types/io.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/types/io.py @@ -55,13 +55,11 @@ class InputConfig(proto.Message): with non-terminal symbols defined near the end of this comment. The formats are: - .. raw:: html - -

AutoML Vision

+ AutoML Vision + ^^^^^^^^^^^^^ - .. raw:: html - -
Classification
+ Classification + '''''''''''''' See `Preparing your training data `__ for @@ -102,12 +100,12 @@ class InputConfig(proto.Message): UNASSIGNED,gs://folder/image3.jpg,daisy UNASSIGNED,gs://folder/image4.jpg - .. raw:: html + Object Detection + '''''''''''''''' -
Object Detection
- See [Preparing your training - data](https://cloud.google.com/vision/automl/object-detection/docs/prepare) - for more information. + See `Preparing your training + data `__ + for more information. A CSV file(s) with each line in format: @@ -153,13 +151,11 @@ class InputConfig(proto.Message):
- .. raw:: html - -

AutoML Video Intelligence

+ AutoML Video Intelligence + ^^^^^^^^^^^^^^^^^^^^^^^^^ - .. raw:: html - -
Classification
+ Classification + '''''''''''''' See `Preparing your training data `__ @@ -209,9 +205,8 @@ class InputConfig(proto.Message): gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - .. raw:: html - -
Object Tracking
+ Object Tracking + ''''''''''''''' See `Preparing your training data `__ @@ -274,18 +269,11 @@ class InputConfig(proto.Message): gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, gs://folder/video2.avi,,,,,,,,,,, - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Natural Language

+ AutoML Natural Language + ^^^^^^^^^^^^^^^^^^^^^^^ - .. raw:: html - -
Entity Extraction
+ Entity Extraction + ''''''''''''''''' See `Preparing your training data `__ for @@ -479,9 +467,8 @@ class InputConfig(proto.Message): }, ], - .. raw:: html - -
Classification
+ Classification + '''''''''''''' See `Preparing your training data `__ @@ -533,9 +520,8 @@ class InputConfig(proto.Message): TEST,gs://folder/document.pdf VALIDATE,gs://folder/text_files.zip,BadFood - .. raw:: html - -
Sentiment Analysis
+ Sentiment Analysis + '''''''''''''''''' See `Preparing your training data `__ @@ -598,15 +584,8 @@ class InputConfig(proto.Message): TEST,gs://folder/document.pdf VALIDATE,gs://folder/text_files.zip,2 - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Tables

+ AutoML Tables + ^^^^^^^^^^^^^ See `Preparing your training data `__ for @@ -647,11 +626,6 @@ class InputConfig(proto.Message): and between 1000 and 100,000,000 rows, inclusive. There are at most 5 import data running in parallel. - .. raw:: html - -
-
- **Input field definitions:** ``ML_USE`` : ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") @@ -728,9 +702,8 @@ class InputConfig(proto.Message): semantic of the imported data, any string must be up to 25000 characters long. - .. raw:: html - -

AutoML Tables

+ AutoML Tables + ^^^^^^^^^^^^^ ``schema_inference_version`` : (integer) This value must be supplied. The version of the algorithm to use for the @@ -763,10 +736,11 @@ class BatchPredictInputConfig(proto.Message): with non-terminal symbols defined near the end of this comment. The formats are: - .. raw:: html + AutoML Vision + ^^^^^^^^^^^^^ -

AutoML Vision

-
Classification
+ Classification + '''''''''''''' One or more CSV files where each line is a single column: @@ -786,9 +760,8 @@ class BatchPredictInputConfig(proto.Message): gs://folder/image2.gif gs://folder/image3.png - .. raw:: html - -
Object Detection
+ Object Detection + '''''''''''''''' One or more CSV files where each line is a single column: @@ -808,15 +781,11 @@ class BatchPredictInputConfig(proto.Message): gs://folder/image2.gif gs://folder/image3.png - .. raw:: html - -
-
- - .. raw:: html + AutoML Video Intelligence + ^^^^^^^^^^^^^^^^^^^^^^^^^ -

AutoML Video Intelligence

-
Classification
+ Classification + '''''''''''''' One or more CSV files where each line is a single column: @@ -839,9 +808,8 @@ class BatchPredictInputConfig(proto.Message): gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf - .. raw:: html - -
Object Tracking
+ Object Tracking + ''''''''''''''' One or more CSV files where each line is a single column: @@ -864,15 +832,11 @@ class BatchPredictInputConfig(proto.Message): gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf - .. raw:: html - -
-
- - .. raw:: html + AutoML Natural Language + ^^^^^^^^^^^^^^^^^^^^^^^ -

AutoML Natural Language

-
Classification
+ Classification + '''''''''''''' One or more CSV files where each line is a single column: @@ -893,10 +857,10 @@ class BatchPredictInputConfig(proto.Message): gs://folder/text2.pdf gs://folder/text3.tif - .. raw:: html + Sentiment Analysis + '''''''''''''''''' -
Sentiment Analysis
- One or more CSV files where each line is a single column: + One or more CSV files where each line is a single column: :: @@ -915,9 +879,8 @@ class BatchPredictInputConfig(proto.Message): gs://folder/text2.pdf gs://folder/text3.tif - .. raw:: html - -
Entity Extraction
+ Entity Extraction + ''''''''''''''''' One or more JSONL (JSON Lines) files that either provide inline text or documents. You can only use one format, either inline text or @@ -992,15 +955,8 @@ class BatchPredictInputConfig(proto.Message): } } - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Tables

+ AutoML Tables + ^^^^^^^^^^^^^ See `Preparing your training data `__ @@ -1049,11 +1005,6 @@ class BatchPredictInputConfig(proto.Message): column spec's data types. Prediction on all the rows of the table will be attempted. - .. raw:: html - -
-
- **Input field definitions:** ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/gapic_version.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/gapic_version.py index 2523dfbe9e23..558c8aab67c5 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/gapic_version.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py index 665e597dd641..0513f1f38e17 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import re from typing import ( Callable, @@ -78,6 +79,15 @@ from .transports.base import DEFAULT_CLIENT_INFO, AutoMlTransport from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class AutoMlAsyncClient: """AutoML Server API. @@ -298,6 +308,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1beta1.AutoMlAsyncClient`.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "credentialsType": None, + }, + ) + async def create_dataset( self, request: Optional[Union[service.CreateDatasetRequest, dict]] = None, @@ -306,7 +338,7 @@ async def create_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Creates a dataset. @@ -360,8 +392,10 @@ async def sample_create_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -426,7 +460,7 @@ async def get_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Gets a dataset. @@ -470,8 +504,10 @@ async def sample_get_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -534,7 +570,7 @@ async def list_datasets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatasetsAsyncPager: r"""Lists datasets in a project. @@ -579,8 +615,10 @@ async def sample_list_datasets(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager: @@ -655,7 +693,7 @@ async def update_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Updates a dataset. @@ -703,8 +741,10 @@ async def sample_update_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -769,7 +809,7 @@ async def delete_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a dataset and all of its contents. Returns empty response in the @@ -821,8 +861,10 @@ async def sample_delete_dataset(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -902,7 +944,7 @@ async def import_data( input_config: Optional[io.InputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Imports data into a dataset. For Tables this method can only be called on an empty Dataset. @@ -968,8 +1010,10 @@ async def sample_import_data(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1051,7 +1095,7 @@ async def export_data( output_config: Optional[io.OutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Exports dataset's data to the provided output location. Returns an empty response in the @@ -1109,8 +1153,10 @@ async def sample_export_data(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1191,7 +1237,7 @@ async def get_annotation_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Gets an annotation spec. @@ -1235,8 +1281,10 @@ async def sample_get_annotation_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.AnnotationSpec: @@ -1295,7 +1343,7 @@ async def get_table_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table_spec.TableSpec: r"""Gets a table spec. @@ -1339,8 +1387,10 @@ async def sample_get_table_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.TableSpec: @@ -1409,7 +1459,7 @@ async def list_table_specs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTableSpecsAsyncPager: r"""Lists table specs in a dataset. @@ -1454,8 +1504,10 @@ async def sample_list_table_specs(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager: @@ -1530,7 +1582,7 @@ async def update_table_spec( table_spec: Optional[gca_table_spec.TableSpec] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_table_spec.TableSpec: r"""Updates a table spec. @@ -1573,8 +1625,10 @@ async def sample_update_table_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.TableSpec: @@ -1645,7 +1699,7 @@ async def get_column_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> column_spec.ColumnSpec: r"""Gets a column spec. @@ -1689,8 +1743,10 @@ async def sample_get_column_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ColumnSpec: @@ -1751,7 +1807,7 @@ async def list_column_specs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListColumnSpecsAsyncPager: r"""Lists column specs in a table spec. @@ -1796,8 +1852,10 @@ async def sample_list_column_specs(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager: @@ -1872,7 +1930,7 @@ async def update_column_spec( column_spec: Optional[gca_column_spec.ColumnSpec] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_column_spec.ColumnSpec: r"""Updates a column spec. @@ -1915,8 +1973,10 @@ async def sample_update_column_spec(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ColumnSpec: @@ -1980,7 +2040,7 @@ async def create_model( model: Optional[gca_model.Model] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a model. Returns a Model in the [response][google.longrunning.Operation.response] field when it @@ -2038,8 +2098,10 @@ async def sample_create_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2113,7 +2175,7 @@ async def get_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Gets a model. @@ -2155,8 +2217,10 @@ async def sample_get_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Model: @@ -2217,7 +2281,7 @@ async def list_models( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelsAsyncPager: r"""Lists models. @@ -2262,8 +2326,10 @@ async def sample_list_models(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager: @@ -2338,7 +2404,7 @@ async def delete_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a model. Returns ``google.protobuf.Empty`` in the [response][google.longrunning.Operation.response] field when it @@ -2389,8 +2455,10 @@ async def sample_delete_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2469,7 +2537,7 @@ async def deploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Deploys a model. If a model is already deployed, deploying it with the same parameters has no effect. Deploying with different @@ -2531,8 +2599,10 @@ async def sample_deploy_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2611,7 +2681,7 @@ async def undeploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Undeploys a model. If the model is not deployed this method has no effect. @@ -2667,8 +2737,10 @@ async def sample_undeploy_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2748,7 +2820,7 @@ async def export_model( output_config: Optional[io.ModelExportOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Exports a trained, "export-able", model to a user specified Google Cloud Storage location. A model is considered export-able @@ -2813,8 +2885,10 @@ async def sample_export_model(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2896,7 +2970,7 @@ async def export_evaluated_examples( output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Exports examples on which the model was evaluated (i.e. which were in the TEST set of the dataset the model was created from), @@ -2966,8 +3040,10 @@ async def sample_export_evaluated_examples(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -3048,7 +3124,7 @@ async def get_model_evaluation( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Gets a model evaluation. @@ -3092,8 +3168,10 @@ async def sample_get_model_evaluation(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ModelEvaluation: @@ -3152,7 +3230,7 @@ async def list_model_evaluations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelEvaluationsAsyncPager: r"""Lists model evaluations. @@ -3200,8 +3278,10 @@ async def sample_list_model_evaluations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/client.py index 06f15fd1ec6f..779ea8ffc620 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.protobuf import empty_pb2 # type: ignore @@ -740,6 +750,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -802,6 +816,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1beta1.AutoMlClient`.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "credentialsType": None, + }, + ) + def create_dataset( self, request: Optional[Union[service.CreateDatasetRequest, dict]] = None, @@ -810,7 +847,7 @@ def create_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Creates a dataset. @@ -864,8 +901,10 @@ def sample_create_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -927,7 +966,7 @@ def get_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Gets a dataset. @@ -971,8 +1010,10 @@ def sample_get_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -1032,7 +1073,7 @@ def list_datasets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatasetsPager: r"""Lists datasets in a project. @@ -1077,8 +1118,10 @@ def sample_list_datasets(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager: @@ -1150,7 +1193,7 @@ def update_dataset( dataset: Optional[gca_dataset.Dataset] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Updates a dataset. @@ -1198,8 +1241,10 @@ def sample_update_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Dataset: @@ -1261,7 +1306,7 @@ def delete_dataset( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deletes a dataset and all of its contents. Returns empty response in the @@ -1313,8 +1358,10 @@ def sample_delete_dataset(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1391,7 +1438,7 @@ def import_data( input_config: Optional[io.InputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Imports data into a dataset. For Tables this method can only be called on an empty Dataset. @@ -1457,8 +1504,10 @@ def sample_import_data(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1537,7 +1586,7 @@ def export_data( output_config: Optional[io.OutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Exports dataset's data to the provided output location. Returns an empty response in the @@ -1595,8 +1644,10 @@ def sample_export_data(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1674,7 +1725,7 @@ def get_annotation_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Gets an annotation spec. @@ -1718,8 +1769,10 @@ def sample_get_annotation_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.AnnotationSpec: @@ -1775,7 +1828,7 @@ def get_table_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table_spec.TableSpec: r"""Gets a table spec. @@ -1819,8 +1872,10 @@ def sample_get_table_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.TableSpec: @@ -1886,7 +1941,7 @@ def list_table_specs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTableSpecsPager: r"""Lists table specs in a dataset. @@ -1931,8 +1986,10 @@ def sample_list_table_specs(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager: @@ -2004,7 +2061,7 @@ def update_table_spec( table_spec: Optional[gca_table_spec.TableSpec] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_table_spec.TableSpec: r"""Updates a table spec. @@ -2047,8 +2104,10 @@ def sample_update_table_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.TableSpec: @@ -2116,7 +2175,7 @@ def get_column_spec( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> column_spec.ColumnSpec: r"""Gets a column spec. @@ -2160,8 +2219,10 @@ def sample_get_column_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ColumnSpec: @@ -2219,7 +2280,7 @@ def list_column_specs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListColumnSpecsPager: r"""Lists column specs in a table spec. @@ -2264,8 +2325,10 @@ def sample_list_column_specs(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager: @@ -2337,7 +2400,7 @@ def update_column_spec( column_spec: Optional[gca_column_spec.ColumnSpec] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_column_spec.ColumnSpec: r"""Updates a column spec. @@ -2380,8 +2443,10 @@ def sample_update_column_spec(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ColumnSpec: @@ -2442,7 +2507,7 @@ def create_model( model: Optional[gca_model.Model] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a model. Returns a Model in the [response][google.longrunning.Operation.response] field when it @@ -2500,8 +2565,10 @@ def sample_create_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2572,7 +2639,7 @@ def get_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Gets a model. @@ -2614,8 +2681,10 @@ def sample_get_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.Model: @@ -2673,7 +2742,7 @@ def list_models( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelsPager: r"""Lists models. @@ -2718,8 +2787,10 @@ def sample_list_models(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager: @@ -2791,7 +2862,7 @@ def delete_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deletes a model. Returns ``google.protobuf.Empty`` in the [response][google.longrunning.Operation.response] field when it @@ -2842,8 +2913,10 @@ def sample_delete_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2919,7 +2992,7 @@ def deploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Deploys a model. If a model is already deployed, deploying it with the same parameters has no effect. Deploying with different @@ -2981,8 +3054,10 @@ def sample_deploy_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3058,7 +3133,7 @@ def undeploy_model( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Undeploys a model. If the model is not deployed this method has no effect. @@ -3114,8 +3189,10 @@ def sample_undeploy_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3192,7 +3269,7 @@ def export_model( output_config: Optional[io.ModelExportOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Exports a trained, "export-able", model to a user specified Google Cloud Storage location. A model is considered export-able @@ -3257,8 +3334,10 @@ def sample_export_model(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3337,7 +3416,7 @@ def export_evaluated_examples( output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Exports examples on which the model was evaluated (i.e. which were in the TEST set of the dataset the model was created from), @@ -3407,8 +3486,10 @@ def sample_export_evaluated_examples(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3488,7 +3569,7 @@ def get_model_evaluation( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Gets a model evaluation. @@ -3532,8 +3613,10 @@ def sample_get_model_evaluation(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.ModelEvaluation: @@ -3589,7 +3672,7 @@ def list_model_evaluations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListModelEvaluationsPager: r"""Lists model evaluations. @@ -3637,8 +3720,10 @@ def sample_list_model_evaluations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/pagers.py index 115fe7f20cbc..36de7fbf3906 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/pagers.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/pagers.py @@ -74,7 +74,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -88,8 +88,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListDatasetsRequest(request) @@ -148,7 +150,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -162,8 +164,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListDatasetsRequest(request) @@ -226,7 +230,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -240,8 +244,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListTableSpecsRequest(request) @@ -300,7 +306,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -314,8 +320,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListTableSpecsRequest(request) @@ -378,7 +386,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -392,8 +400,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListColumnSpecsRequest(request) @@ -452,7 +462,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -466,8 +476,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListColumnSpecsRequest(request) @@ -530,7 +542,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -544,8 +556,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelsRequest(request) @@ -604,7 +618,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -618,8 +632,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelsRequest(request) @@ -682,7 +698,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -696,8 +712,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelEvaluationsRequest(request) @@ -756,7 +774,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -770,8 +788,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = service.ListModelEvaluationsRequest(request) diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py index bedc251aea3a..672e5ce0c5f3 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -21,7 +24,10 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.automl_v1beta1.types import annotation_spec from google.cloud.automl_v1beta1.types import column_spec @@ -34,6 +40,81 @@ from .base import DEFAULT_CLIENT_INFO, AutoMlTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class AutoMlGrpcTransport(AutoMlTransport): """gRPC backend transport for AutoMl. @@ -202,7 +283,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -266,7 +352,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -290,7 +378,7 @@ def create_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_dataset" not in self._stubs: - self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + self._stubs["create_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/CreateDataset", request_serializer=service.CreateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -314,7 +402,7 @@ def get_dataset(self) -> Callable[[service.GetDatasetRequest], dataset.Dataset]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_dataset" not in self._stubs: - self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + self._stubs["get_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetDataset", request_serializer=service.GetDatasetRequest.serialize, response_deserializer=dataset.Dataset.deserialize, @@ -340,7 +428,7 @@ def list_datasets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_datasets" not in self._stubs: - self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + self._stubs["list_datasets"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListDatasets", request_serializer=service.ListDatasetsRequest.serialize, response_deserializer=service.ListDatasetsResponse.deserialize, @@ -366,7 +454,7 @@ def update_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_dataset" not in self._stubs: - self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + self._stubs["update_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateDataset", request_serializer=service.UpdateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -396,7 +484,7 @@ def delete_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_dataset" not in self._stubs: - self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + self._stubs["delete_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeleteDataset", request_serializer=service.DeleteDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -431,7 +519,7 @@ def import_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "import_data" not in self._stubs: - self._stubs["import_data"] = self.grpc_channel.unary_unary( + self._stubs["import_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ImportData", request_serializer=service.ImportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -460,7 +548,7 @@ def export_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_data" not in self._stubs: - self._stubs["export_data"] = self.grpc_channel.unary_unary( + self._stubs["export_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportData", request_serializer=service.ExportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -486,7 +574,7 @@ def get_annotation_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_annotation_spec" not in self._stubs: - self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_annotation_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec", request_serializer=service.GetAnnotationSpecRequest.serialize, response_deserializer=annotation_spec.AnnotationSpec.deserialize, @@ -512,7 +600,7 @@ def get_table_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table_spec" not in self._stubs: - self._stubs["get_table_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_table_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetTableSpec", request_serializer=service.GetTableSpecRequest.serialize, response_deserializer=table_spec.TableSpec.deserialize, @@ -538,7 +626,7 @@ def list_table_specs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_table_specs" not in self._stubs: - self._stubs["list_table_specs"] = self.grpc_channel.unary_unary( + self._stubs["list_table_specs"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs", request_serializer=service.ListTableSpecsRequest.serialize, response_deserializer=service.ListTableSpecsResponse.deserialize, @@ -564,7 +652,7 @@ def update_table_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table_spec" not in self._stubs: - self._stubs["update_table_spec"] = self.grpc_channel.unary_unary( + self._stubs["update_table_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec", request_serializer=service.UpdateTableSpecRequest.serialize, response_deserializer=gca_table_spec.TableSpec.deserialize, @@ -590,7 +678,7 @@ def get_column_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_column_spec" not in self._stubs: - self._stubs["get_column_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_column_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec", request_serializer=service.GetColumnSpecRequest.serialize, response_deserializer=column_spec.ColumnSpec.deserialize, @@ -616,7 +704,7 @@ def list_column_specs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_column_specs" not in self._stubs: - self._stubs["list_column_specs"] = self.grpc_channel.unary_unary( + self._stubs["list_column_specs"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs", request_serializer=service.ListColumnSpecsRequest.serialize, response_deserializer=service.ListColumnSpecsResponse.deserialize, @@ -642,7 +730,7 @@ def update_column_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_column_spec" not in self._stubs: - self._stubs["update_column_spec"] = self.grpc_channel.unary_unary( + self._stubs["update_column_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec", request_serializer=service.UpdateColumnSpecRequest.serialize, response_deserializer=gca_column_spec.ColumnSpec.deserialize, @@ -672,7 +760,7 @@ def create_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model" not in self._stubs: - self._stubs["create_model"] = self.grpc_channel.unary_unary( + self._stubs["create_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/CreateModel", request_serializer=service.CreateModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -696,7 +784,7 @@ def get_model(self) -> Callable[[service.GetModelRequest], model.Model]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model" not in self._stubs: - self._stubs["get_model"] = self.grpc_channel.unary_unary( + self._stubs["get_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetModel", request_serializer=service.GetModelRequest.serialize, response_deserializer=model.Model.deserialize, @@ -722,7 +810,7 @@ def list_models( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_models" not in self._stubs: - self._stubs["list_models"] = self.grpc_channel.unary_unary( + self._stubs["list_models"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListModels", request_serializer=service.ListModelsRequest.serialize, response_deserializer=service.ListModelsResponse.deserialize, @@ -751,7 +839,7 @@ def delete_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model" not in self._stubs: - self._stubs["delete_model"] = self.grpc_channel.unary_unary( + self._stubs["delete_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeleteModel", request_serializer=service.DeleteModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -791,7 +879,7 @@ def deploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "deploy_model" not in self._stubs: - self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + self._stubs["deploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeployModel", request_serializer=service.DeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -825,7 +913,7 @@ def undeploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undeploy_model" not in self._stubs: - self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + self._stubs["undeploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UndeployModel", request_serializer=service.UndeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -859,7 +947,7 @@ def export_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_model" not in self._stubs: - self._stubs["export_model"] = self.grpc_channel.unary_unary( + self._stubs["export_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportModel", request_serializer=service.ExportModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -899,7 +987,7 @@ def export_evaluated_examples( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_evaluated_examples" not in self._stubs: - self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary( + self._stubs["export_evaluated_examples"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples", request_serializer=service.ExportEvaluatedExamplesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -927,7 +1015,7 @@ def get_model_evaluation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation" not in self._stubs: - self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + self._stubs["get_model_evaluation"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation", request_serializer=service.GetModelEvaluationRequest.serialize, response_deserializer=model_evaluation.ModelEvaluation.deserialize, @@ -955,7 +1043,7 @@ def list_model_evaluations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluations" not in self._stubs: - self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + self._stubs["list_model_evaluations"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations", request_serializer=service.ListModelEvaluationsRequest.serialize, response_deserializer=service.ListModelEvaluationsResponse.deserialize, @@ -963,7 +1051,7 @@ def list_model_evaluations( return self._stubs["list_model_evaluations"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py index 1d95ca5511ef..d4a20bd93014 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import logging as std_logging +import pickle from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -23,8 +26,11 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore from grpc.experimental import aio # type: ignore +import proto # type: ignore from google.cloud.automl_v1beta1.types import annotation_spec from google.cloud.automl_v1beta1.types import column_spec @@ -38,6 +44,82 @@ from .base import DEFAULT_CLIENT_INFO, AutoMlTransport from .grpc import AutoMlGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class AutoMlGrpcAsyncIOTransport(AutoMlTransport): """gRPC AsyncIO backend transport for AutoMl. @@ -249,10 +331,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -275,7 +360,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -300,7 +385,7 @@ def create_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_dataset" not in self._stubs: - self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + self._stubs["create_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/CreateDataset", request_serializer=service.CreateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -326,7 +411,7 @@ def get_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_dataset" not in self._stubs: - self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + self._stubs["get_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetDataset", request_serializer=service.GetDatasetRequest.serialize, response_deserializer=dataset.Dataset.deserialize, @@ -354,7 +439,7 @@ def list_datasets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_datasets" not in self._stubs: - self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + self._stubs["list_datasets"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListDatasets", request_serializer=service.ListDatasetsRequest.serialize, response_deserializer=service.ListDatasetsResponse.deserialize, @@ -380,7 +465,7 @@ def update_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_dataset" not in self._stubs: - self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + self._stubs["update_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateDataset", request_serializer=service.UpdateDatasetRequest.serialize, response_deserializer=gca_dataset.Dataset.deserialize, @@ -410,7 +495,7 @@ def delete_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_dataset" not in self._stubs: - self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + self._stubs["delete_dataset"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeleteDataset", request_serializer=service.DeleteDatasetRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -445,7 +530,7 @@ def import_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "import_data" not in self._stubs: - self._stubs["import_data"] = self.grpc_channel.unary_unary( + self._stubs["import_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ImportData", request_serializer=service.ImportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -474,7 +559,7 @@ def export_data( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_data" not in self._stubs: - self._stubs["export_data"] = self.grpc_channel.unary_unary( + self._stubs["export_data"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportData", request_serializer=service.ExportDataRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -502,7 +587,7 @@ def get_annotation_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_annotation_spec" not in self._stubs: - self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_annotation_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec", request_serializer=service.GetAnnotationSpecRequest.serialize, response_deserializer=annotation_spec.AnnotationSpec.deserialize, @@ -528,7 +613,7 @@ def get_table_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table_spec" not in self._stubs: - self._stubs["get_table_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_table_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetTableSpec", request_serializer=service.GetTableSpecRequest.serialize, response_deserializer=table_spec.TableSpec.deserialize, @@ -556,7 +641,7 @@ def list_table_specs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_table_specs" not in self._stubs: - self._stubs["list_table_specs"] = self.grpc_channel.unary_unary( + self._stubs["list_table_specs"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs", request_serializer=service.ListTableSpecsRequest.serialize, response_deserializer=service.ListTableSpecsResponse.deserialize, @@ -584,7 +669,7 @@ def update_table_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table_spec" not in self._stubs: - self._stubs["update_table_spec"] = self.grpc_channel.unary_unary( + self._stubs["update_table_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec", request_serializer=service.UpdateTableSpecRequest.serialize, response_deserializer=gca_table_spec.TableSpec.deserialize, @@ -610,7 +695,7 @@ def get_column_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_column_spec" not in self._stubs: - self._stubs["get_column_spec"] = self.grpc_channel.unary_unary( + self._stubs["get_column_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec", request_serializer=service.GetColumnSpecRequest.serialize, response_deserializer=column_spec.ColumnSpec.deserialize, @@ -638,7 +723,7 @@ def list_column_specs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_column_specs" not in self._stubs: - self._stubs["list_column_specs"] = self.grpc_channel.unary_unary( + self._stubs["list_column_specs"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs", request_serializer=service.ListColumnSpecsRequest.serialize, response_deserializer=service.ListColumnSpecsResponse.deserialize, @@ -666,7 +751,7 @@ def update_column_spec( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_column_spec" not in self._stubs: - self._stubs["update_column_spec"] = self.grpc_channel.unary_unary( + self._stubs["update_column_spec"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec", request_serializer=service.UpdateColumnSpecRequest.serialize, response_deserializer=gca_column_spec.ColumnSpec.deserialize, @@ -696,7 +781,7 @@ def create_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model" not in self._stubs: - self._stubs["create_model"] = self.grpc_channel.unary_unary( + self._stubs["create_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/CreateModel", request_serializer=service.CreateModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -720,7 +805,7 @@ def get_model(self) -> Callable[[service.GetModelRequest], Awaitable[model.Model # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model" not in self._stubs: - self._stubs["get_model"] = self.grpc_channel.unary_unary( + self._stubs["get_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetModel", request_serializer=service.GetModelRequest.serialize, response_deserializer=model.Model.deserialize, @@ -746,7 +831,7 @@ def list_models( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_models" not in self._stubs: - self._stubs["list_models"] = self.grpc_channel.unary_unary( + self._stubs["list_models"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListModels", request_serializer=service.ListModelsRequest.serialize, response_deserializer=service.ListModelsResponse.deserialize, @@ -775,7 +860,7 @@ def delete_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model" not in self._stubs: - self._stubs["delete_model"] = self.grpc_channel.unary_unary( + self._stubs["delete_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeleteModel", request_serializer=service.DeleteModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -815,7 +900,7 @@ def deploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "deploy_model" not in self._stubs: - self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + self._stubs["deploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/DeployModel", request_serializer=service.DeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -849,7 +934,7 @@ def undeploy_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undeploy_model" not in self._stubs: - self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + self._stubs["undeploy_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/UndeployModel", request_serializer=service.UndeployModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -883,7 +968,7 @@ def export_model( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_model" not in self._stubs: - self._stubs["export_model"] = self.grpc_channel.unary_unary( + self._stubs["export_model"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportModel", request_serializer=service.ExportModelRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -925,7 +1010,7 @@ def export_evaluated_examples( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_evaluated_examples" not in self._stubs: - self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary( + self._stubs["export_evaluated_examples"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples", request_serializer=service.ExportEvaluatedExamplesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -953,7 +1038,7 @@ def get_model_evaluation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation" not in self._stubs: - self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + self._stubs["get_model_evaluation"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation", request_serializer=service.GetModelEvaluationRequest.serialize, response_deserializer=model_evaluation.ModelEvaluation.deserialize, @@ -982,7 +1067,7 @@ def list_model_evaluations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluations" not in self._stubs: - self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + self._stubs["list_model_evaluations"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations", request_serializer=service.ListModelEvaluationsRequest.serialize, response_deserializer=service.ListModelEvaluationsResponse.deserialize, @@ -1240,7 +1325,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py index ee4428d21399..5724fc0eed1b 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import dataclasses import json # type: ignore +import logging from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings @@ -45,6 +45,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -267,8 +275,10 @@ def post_update_table_spec(self, response): """ def pre_create_dataset( - self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.CreateDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for create_dataset Override in a subclass to manipulate the request or metadata @@ -286,8 +296,10 @@ def post_create_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Data return response def pre_create_model( - self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.CreateModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for create_model Override in a subclass to manipulate the request or metadata @@ -307,8 +319,10 @@ def post_create_model( return response def pre_delete_dataset( - self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeleteDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_dataset Override in a subclass to manipulate the request or metadata @@ -328,8 +342,10 @@ def post_delete_dataset( return response def pre_delete_model( - self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeleteModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_model Override in a subclass to manipulate the request or metadata @@ -349,8 +365,10 @@ def post_delete_model( return response def pre_deploy_model( - self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.DeployModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for deploy_model Override in a subclass to manipulate the request or metadata @@ -370,8 +388,10 @@ def post_deploy_model( return response def pre_export_data( - self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ExportDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for export_data Override in a subclass to manipulate the request or metadata @@ -393,8 +413,10 @@ def post_export_data( def pre_export_evaluated_examples( self, request: service.ExportEvaluatedExamplesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.ExportEvaluatedExamplesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.ExportEvaluatedExamplesRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for export_evaluated_examples Override in a subclass to manipulate the request or metadata @@ -414,8 +436,10 @@ def post_export_evaluated_examples( return response def pre_export_model( - self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ExportModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for export_model Override in a subclass to manipulate the request or metadata @@ -437,8 +461,10 @@ def post_export_model( def pre_get_annotation_spec( self, request: service.GetAnnotationSpecRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.GetAnnotationSpecRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_annotation_spec Override in a subclass to manipulate the request or metadata @@ -458,8 +484,10 @@ def post_get_annotation_spec( return response def pre_get_column_spec( - self, request: service.GetColumnSpecRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetColumnSpecRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetColumnSpecRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetColumnSpecRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_column_spec Override in a subclass to manipulate the request or metadata @@ -479,8 +507,10 @@ def post_get_column_spec( return response def pre_get_dataset( - self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_dataset Override in a subclass to manipulate the request or metadata @@ -498,8 +528,10 @@ def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: return response def pre_get_model( - self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_model Override in a subclass to manipulate the request or metadata @@ -519,8 +551,10 @@ def post_get_model(self, response: model.Model) -> model.Model: def pre_get_model_evaluation( self, request: service.GetModelEvaluationRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.GetModelEvaluationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_model_evaluation Override in a subclass to manipulate the request or metadata @@ -540,8 +574,10 @@ def post_get_model_evaluation( return response def pre_get_table_spec( - self, request: service.GetTableSpecRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.GetTableSpecRequest, Sequence[Tuple[str, str]]]: + self, + request: service.GetTableSpecRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.GetTableSpecRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_table_spec Override in a subclass to manipulate the request or metadata @@ -561,8 +597,10 @@ def post_get_table_spec( return response def pre_import_data( - self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ImportDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for import_data Override in a subclass to manipulate the request or metadata @@ -584,8 +622,8 @@ def post_import_data( def pre_list_column_specs( self, request: service.ListColumnSpecsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.ListColumnSpecsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListColumnSpecsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_column_specs Override in a subclass to manipulate the request or metadata @@ -605,8 +643,10 @@ def post_list_column_specs( return response def pre_list_datasets( - self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ListDatasetsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_datasets Override in a subclass to manipulate the request or metadata @@ -628,8 +668,10 @@ def post_list_datasets( def pre_list_model_evaluations( self, request: service.ListModelEvaluationsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.ListModelEvaluationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_model_evaluations Override in a subclass to manipulate the request or metadata @@ -649,8 +691,10 @@ def post_list_model_evaluations( return response def pre_list_models( - self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: + self, + request: service.ListModelsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_models Override in a subclass to manipulate the request or metadata @@ -672,8 +716,8 @@ def post_list_models( def pre_list_table_specs( self, request: service.ListTableSpecsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.ListTableSpecsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.ListTableSpecsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_table_specs Override in a subclass to manipulate the request or metadata @@ -693,8 +737,10 @@ def post_list_table_specs( return response def pre_undeploy_model( - self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: + self, + request: service.UndeployModelRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for undeploy_model Override in a subclass to manipulate the request or metadata @@ -716,8 +762,10 @@ def post_undeploy_model( def pre_update_column_spec( self, request: service.UpdateColumnSpecRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.UpdateColumnSpecRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + service.UpdateColumnSpecRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for update_column_spec Override in a subclass to manipulate the request or metadata @@ -737,8 +785,10 @@ def post_update_column_spec( return response def pre_update_dataset( - self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: + self, + request: service.UpdateDatasetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_dataset Override in a subclass to manipulate the request or metadata @@ -758,8 +808,8 @@ def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Data def pre_update_table_spec( self, request: service.UpdateTableSpecRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[service.UpdateTableSpecRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[service.UpdateTableSpecRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_table_spec Override in a subclass to manipulate the request or metadata @@ -973,7 +1023,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Call the create dataset method over HTTP. @@ -984,8 +1034,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_dataset.Dataset: @@ -999,6 +1051,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseCreateDataset._get_http_options() ) + request, metadata = self._interceptor.pre_create_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseCreateDataset._get_transcoded_request( @@ -1017,6 +1070,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.CreateDataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "CreateDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._CreateDataset._get_response( self._host, @@ -1038,7 +1118,29 @@ def __call__( pb_resp = gca_dataset.Dataset.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_dataset.Dataset.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.create_dataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "CreateDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateModel(_BaseAutoMlRestTransport._BaseCreateModel, AutoMlRestStub): @@ -1074,7 +1176,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create model method over HTTP. @@ -1085,8 +1187,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1097,6 +1201,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseCreateModel._get_http_options() + request, metadata = self._interceptor.pre_create_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseCreateModel._get_transcoded_request( @@ -1115,6 +1220,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.CreateModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "CreateModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._CreateModel._get_response( self._host, @@ -1134,7 +1266,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.create_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "CreateModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteDataset(_BaseAutoMlRestTransport._BaseDeleteDataset, AutoMlRestStub): @@ -1169,7 +1323,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the delete dataset method over HTTP. @@ -1180,8 +1334,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1194,6 +1350,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseDeleteDataset._get_http_options() ) + request, metadata = self._interceptor.pre_delete_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeleteDataset._get_transcoded_request( @@ -1208,6 +1365,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.DeleteDataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeleteDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeleteDataset._get_response( self._host, @@ -1226,7 +1410,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.delete_dataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeleteDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteModel(_BaseAutoMlRestTransport._BaseDeleteModel, AutoMlRestStub): @@ -1261,7 +1467,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the delete model method over HTTP. @@ -1272,8 +1478,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1284,6 +1492,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseDeleteModel._get_http_options() + request, metadata = self._interceptor.pre_delete_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeleteModel._get_transcoded_request( @@ -1298,6 +1507,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.DeleteModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeleteModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeleteModel._get_response( self._host, @@ -1316,7 +1552,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.delete_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeleteModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeployModel(_BaseAutoMlRestTransport._BaseDeployModel, AutoMlRestStub): @@ -1352,7 +1610,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the deploy model method over HTTP. @@ -1363,8 +1621,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1375,6 +1635,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseDeployModel._get_http_options() + request, metadata = self._interceptor.pre_deploy_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseDeployModel._get_transcoded_request( @@ -1393,6 +1654,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.DeployModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeployModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._DeployModel._get_response( self._host, @@ -1412,7 +1700,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_deploy_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.deploy_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "DeployModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExportData(_BaseAutoMlRestTransport._BaseExportData, AutoMlRestStub): @@ -1448,7 +1758,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the export data method over HTTP. @@ -1459,8 +1769,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1471,6 +1783,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseExportData._get_http_options() + request, metadata = self._interceptor.pre_export_data(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseExportData._get_transcoded_request( @@ -1489,6 +1802,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ExportData", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ExportData._get_response( self._host, @@ -1508,7 +1848,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_data(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.export_data", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExportEvaluatedExamples( @@ -1546,7 +1908,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the export evaluated examples method over HTTP. @@ -1557,8 +1919,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1571,6 +1935,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseExportEvaluatedExamples._get_http_options() ) + request, metadata = self._interceptor.pre_export_evaluated_examples( request, metadata ) @@ -1587,6 +1952,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ExportEvaluatedExamples", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportEvaluatedExamples", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ExportEvaluatedExamples._get_response( self._host, @@ -1606,7 +1998,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_evaluated_examples(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.export_evaluated_examples", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportEvaluatedExamples", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExportModel(_BaseAutoMlRestTransport._BaseExportModel, AutoMlRestStub): @@ -1642,7 +2056,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the export model method over HTTP. @@ -1655,8 +2069,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1667,6 +2083,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseExportModel._get_http_options() + request, metadata = self._interceptor.pre_export_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseExportModel._get_transcoded_request( @@ -1685,6 +2102,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ExportModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ExportModel._get_response( self._host, @@ -1704,7 +2148,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.export_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ExportModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetAnnotationSpec( @@ -1741,7 +2207,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> annotation_spec.AnnotationSpec: r"""Call the get annotation spec method over HTTP. @@ -1752,8 +2218,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.annotation_spec.AnnotationSpec: @@ -1763,6 +2231,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetAnnotationSpec._get_http_options() ) + request, metadata = self._interceptor.pre_get_annotation_spec( request, metadata ) @@ -1779,6 +2248,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetAnnotationSpec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetAnnotationSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetAnnotationSpec._get_response( self._host, @@ -1799,7 +2295,29 @@ def __call__( pb_resp = annotation_spec.AnnotationSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_annotation_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = annotation_spec.AnnotationSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_annotation_spec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetAnnotationSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetColumnSpec(_BaseAutoMlRestTransport._BaseGetColumnSpec, AutoMlRestStub): @@ -1834,7 +2352,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> column_spec.ColumnSpec: r"""Call the get column spec method over HTTP. @@ -1845,8 +2363,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.column_spec.ColumnSpec: @@ -1861,6 +2381,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetColumnSpec._get_http_options() ) + request, metadata = self._interceptor.pre_get_column_spec(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetColumnSpec._get_transcoded_request( @@ -1875,6 +2396,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetColumnSpec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetColumnSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetColumnSpec._get_response( self._host, @@ -1895,7 +2443,29 @@ def __call__( pb_resp = column_spec.ColumnSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_column_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = column_spec.ColumnSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_column_spec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetColumnSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetDataset(_BaseAutoMlRestTransport._BaseGetDataset, AutoMlRestStub): @@ -1930,7 +2500,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> dataset.Dataset: r"""Call the get dataset method over HTTP. @@ -1941,8 +2511,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.dataset.Dataset: @@ -1954,6 +2526,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseGetDataset._get_http_options() + request, metadata = self._interceptor.pre_get_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetDataset._get_transcoded_request( @@ -1968,6 +2541,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetDataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetDataset._get_response( self._host, @@ -1988,7 +2588,29 @@ def __call__( pb_resp = dataset.Dataset.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = dataset.Dataset.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_dataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetModel(_BaseAutoMlRestTransport._BaseGetModel, AutoMlRestStub): @@ -2023,7 +2645,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model.Model: r"""Call the get model method over HTTP. @@ -2034,8 +2656,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.model.Model: @@ -2045,6 +2669,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseGetModel._get_http_options() + request, metadata = self._interceptor.pre_get_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetModel._get_transcoded_request( @@ -2059,6 +2684,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetModel._get_response( self._host, @@ -2079,7 +2731,29 @@ def __call__( pb_resp = model.Model.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = model.Model.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetModelEvaluation( @@ -2116,7 +2790,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> model_evaluation.ModelEvaluation: r"""Call the get model evaluation method over HTTP. @@ -2127,8 +2801,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.model_evaluation.ModelEvaluation: @@ -2138,6 +2814,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetModelEvaluation._get_http_options() ) + request, metadata = self._interceptor.pre_get_model_evaluation( request, metadata ) @@ -2152,6 +2829,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetModelEvaluation", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetModelEvaluation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetModelEvaluation._get_response( self._host, @@ -2172,7 +2876,31 @@ def __call__( pb_resp = model_evaluation.ModelEvaluation.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_evaluation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = model_evaluation.ModelEvaluation.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_model_evaluation", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetModelEvaluation", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetTableSpec(_BaseAutoMlRestTransport._BaseGetTableSpec, AutoMlRestStub): @@ -2207,7 +2935,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table_spec.TableSpec: r"""Call the get table spec method over HTTP. @@ -2218,8 +2946,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table_spec.TableSpec: @@ -2240,6 +2970,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseGetTableSpec._get_http_options() ) + request, metadata = self._interceptor.pre_get_table_spec(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseGetTableSpec._get_transcoded_request( @@ -2254,6 +2985,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.GetTableSpec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetTableSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._GetTableSpec._get_response( self._host, @@ -2274,7 +3032,29 @@ def __call__( pb_resp = table_spec.TableSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table_spec.TableSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.get_table_spec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "GetTableSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ImportData(_BaseAutoMlRestTransport._BaseImportData, AutoMlRestStub): @@ -2310,7 +3090,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the import data method over HTTP. @@ -2321,8 +3101,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2333,6 +3115,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseImportData._get_http_options() + request, metadata = self._interceptor.pre_import_data(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseImportData._get_transcoded_request( @@ -2351,6 +3134,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ImportData", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ImportData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ImportData._get_response( self._host, @@ -2370,7 +3180,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_data(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.import_data", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ImportData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListColumnSpecs( @@ -2407,7 +3239,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListColumnSpecsResponse: r"""Call the list column specs method over HTTP. @@ -2418,8 +3250,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListColumnSpecsResponse: @@ -2431,6 +3265,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListColumnSpecs._get_http_options() ) + request, metadata = self._interceptor.pre_list_column_specs( request, metadata ) @@ -2447,6 +3282,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ListColumnSpecs", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListColumnSpecs", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListColumnSpecs._get_response( self._host, @@ -2467,7 +3329,29 @@ def __call__( pb_resp = service.ListColumnSpecsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_column_specs(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListColumnSpecsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.list_column_specs", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListColumnSpecs", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListDatasets(_BaseAutoMlRestTransport._BaseListDatasets, AutoMlRestStub): @@ -2502,7 +3386,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListDatasetsResponse: r"""Call the list datasets method over HTTP. @@ -2513,8 +3397,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListDatasetsResponse: @@ -2526,6 +3412,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListDatasets._get_http_options() ) + request, metadata = self._interceptor.pre_list_datasets(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseListDatasets._get_transcoded_request( @@ -2540,6 +3427,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ListDatasets", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListDatasets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListDatasets._get_response( self._host, @@ -2560,7 +3474,29 @@ def __call__( pb_resp = service.ListDatasetsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_datasets(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListDatasetsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.list_datasets", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListDatasets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListModelEvaluations( @@ -2597,7 +3533,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListModelEvaluationsResponse: r"""Call the list model evaluations method over HTTP. @@ -2608,8 +3544,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListModelEvaluationsResponse: @@ -2621,6 +3559,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListModelEvaluations._get_http_options() ) + request, metadata = self._interceptor.pre_list_model_evaluations( request, metadata ) @@ -2633,6 +3572,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ListModelEvaluations", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListModelEvaluations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListModelEvaluations._get_response( self._host, @@ -2653,7 +3619,31 @@ def __call__( pb_resp = service.ListModelEvaluationsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_evaluations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListModelEvaluationsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.list_model_evaluations", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListModelEvaluations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListModels(_BaseAutoMlRestTransport._BaseListModels, AutoMlRestStub): @@ -2688,7 +3678,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListModelsResponse: r"""Call the list models method over HTTP. @@ -2699,8 +3689,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListModelsResponse: @@ -2710,6 +3702,7 @@ def __call__( """ http_options = _BaseAutoMlRestTransport._BaseListModels._get_http_options() + request, metadata = self._interceptor.pre_list_models(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseListModels._get_transcoded_request( @@ -2724,6 +3717,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ListModels", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListModels", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListModels._get_response( self._host, @@ -2744,7 +3764,29 @@ def __call__( pb_resp = service.ListModelsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListModelsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.list_models", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListModels", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListTableSpecs(_BaseAutoMlRestTransport._BaseListTableSpecs, AutoMlRestStub): @@ -2779,7 +3821,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> service.ListTableSpecsResponse: r"""Call the list table specs method over HTTP. @@ -2790,8 +3832,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.service.ListTableSpecsResponse: @@ -2803,6 +3847,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseListTableSpecs._get_http_options() ) + request, metadata = self._interceptor.pre_list_table_specs( request, metadata ) @@ -2819,6 +3864,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.ListTableSpecs", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListTableSpecs", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._ListTableSpecs._get_response( self._host, @@ -2839,7 +3911,29 @@ def __call__( pb_resp = service.ListTableSpecsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_table_specs(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = service.ListTableSpecsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.list_table_specs", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "ListTableSpecs", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UndeployModel(_BaseAutoMlRestTransport._BaseUndeployModel, AutoMlRestStub): @@ -2875,7 +3969,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the undeploy model method over HTTP. @@ -2886,8 +3980,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2900,6 +3996,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUndeployModel._get_http_options() ) + request, metadata = self._interceptor.pre_undeploy_model(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseUndeployModel._get_transcoded_request( @@ -2918,6 +4015,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.UndeployModel", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UndeployModel", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UndeployModel._get_response( self._host, @@ -2937,7 +4061,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undeploy_model(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.undeploy_model", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UndeployModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateColumnSpec( @@ -2975,7 +4121,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_column_spec.ColumnSpec: r"""Call the update column spec method over HTTP. @@ -2986,8 +4132,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_column_spec.ColumnSpec: @@ -3002,6 +4150,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUpdateColumnSpec._get_http_options() ) + request, metadata = self._interceptor.pre_update_column_spec( request, metadata ) @@ -3024,6 +4173,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.UpdateColumnSpec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateColumnSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UpdateColumnSpec._get_response( self._host, @@ -3045,7 +4221,29 @@ def __call__( pb_resp = gca_column_spec.ColumnSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_column_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_column_spec.ColumnSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.update_column_spec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateColumnSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateDataset(_BaseAutoMlRestTransport._BaseUpdateDataset, AutoMlRestStub): @@ -3081,7 +4279,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_dataset.Dataset: r"""Call the update dataset method over HTTP. @@ -3092,8 +4290,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_dataset.Dataset: @@ -3107,6 +4307,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUpdateDataset._get_http_options() ) + request, metadata = self._interceptor.pre_update_dataset(request, metadata) transcoded_request = ( _BaseAutoMlRestTransport._BaseUpdateDataset._get_transcoded_request( @@ -3125,6 +4326,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.UpdateDataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateDataset", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UpdateDataset._get_response( self._host, @@ -3146,7 +4374,29 @@ def __call__( pb_resp = gca_dataset.Dataset.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_dataset(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_dataset.Dataset.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.update_dataset", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateDataset", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateTableSpec( @@ -3184,7 +4434,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gca_table_spec.TableSpec: r"""Call the update table spec method over HTTP. @@ -3195,8 +4445,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gca_table_spec.TableSpec: @@ -3217,6 +4469,7 @@ def __call__( http_options = ( _BaseAutoMlRestTransport._BaseUpdateTableSpec._get_http_options() ) + request, metadata = self._interceptor.pre_update_table_spec( request, metadata ) @@ -3237,6 +4490,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.AutoMlClient.UpdateTableSpec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateTableSpec", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = AutoMlRestTransport._UpdateTableSpec._get_response( self._host, @@ -3258,7 +4538,29 @@ def __call__( pb_resp = gca_table_spec.TableSpec.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table_spec(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_table_spec.TableSpec.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.AutoMlClient.update_table_spec", + extra={ + "serviceName": "google.cloud.automl.v1beta1.AutoMl", + "rpcName": "UpdateTableSpec", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py index b8ad20c31516..a6fb80b73b8d 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import re from typing import ( Callable, @@ -57,6 +58,15 @@ from .transports.base import DEFAULT_CLIENT_INFO, PredictionServiceTransport from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class PredictionServiceAsyncClient: """AutoML Prediction API. @@ -270,6 +280,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1beta1.PredictionServiceAsyncClient`.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "credentialsType": None, + }, + ) + async def predict( self, request: Optional[Union[prediction_service.PredictRequest, dict]] = None, @@ -279,7 +311,7 @@ async def predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Perform an online prediction. The prediction result will be directly returned in the response. Available for following ML @@ -383,8 +415,10 @@ async def sample_predict(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.PredictResponse: @@ -451,7 +485,7 @@ async def batch_predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], @@ -618,8 +652,10 @@ async def sample_batch_predict(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/client.py index 23ab0d0fe0ee..ae7e7884258f 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -594,6 +604,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -660,6 +674,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.automl_v1beta1.PredictionServiceClient`.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "credentialsType": None, + }, + ) + def predict( self, request: Optional[Union[prediction_service.PredictRequest, dict]] = None, @@ -669,7 +706,7 @@ def predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Perform an online prediction. The prediction result will be directly returned in the response. Available for following ML @@ -773,8 +810,10 @@ def sample_predict(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.automl_v1beta1.types.PredictResponse: @@ -839,7 +878,7 @@ def batch_predict( params: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], @@ -1006,8 +1045,10 @@ def sample_batch_predict(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py index 27233b6d79dd..cf284c2781c6 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -21,12 +24,90 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.automl_v1beta1.types import prediction_service from .base import DEFAULT_CLIENT_INFO, PredictionServiceTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class PredictionServiceGrpcTransport(PredictionServiceTransport): """gRPC backend transport for PredictionService. @@ -184,7 +265,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -248,7 +334,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -294,7 +382,7 @@ def predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "predict" not in self._stubs: - self._stubs["predict"] = self.grpc_channel.unary_unary( + self._stubs["predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.PredictionService/Predict", request_serializer=prediction_service.PredictRequest.serialize, response_deserializer=prediction_service.PredictResponse.deserialize, @@ -336,7 +424,7 @@ def batch_predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_predict" not in self._stubs: - self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + self._stubs["batch_predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.PredictionService/BatchPredict", request_serializer=prediction_service.BatchPredictRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -344,7 +432,7 @@ def batch_predict( return self._stubs["batch_predict"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py index 7837e7ca5c9c..af48008c8c81 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import logging as std_logging +import pickle from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union import warnings @@ -23,14 +26,93 @@ from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore from grpc.experimental import aio # type: ignore +import proto # type: ignore from google.cloud.automl_v1beta1.types import prediction_service from .base import DEFAULT_CLIENT_INFO, PredictionServiceTransport from .grpc import PredictionServiceGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): """gRPC AsyncIO backend transport for PredictionService. @@ -231,10 +313,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -257,7 +342,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -305,7 +390,7 @@ def predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "predict" not in self._stubs: - self._stubs["predict"] = self.grpc_channel.unary_unary( + self._stubs["predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.PredictionService/Predict", request_serializer=prediction_service.PredictRequest.serialize, response_deserializer=prediction_service.PredictResponse.deserialize, @@ -349,7 +434,7 @@ def batch_predict( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_predict" not in self._stubs: - self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + self._stubs["batch_predict"] = self._logged_channel.unary_unary( "/google.cloud.automl.v1beta1.PredictionService/BatchPredict", request_serializer=prediction_service.BatchPredictRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -377,7 +462,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py index 52e2705b419b..342dd9cbcfa6 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import dataclasses import json # type: ignore +import logging from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings @@ -38,6 +38,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -86,8 +94,10 @@ def post_predict(self, response): def pre_batch_predict( self, request: prediction_service.BatchPredictRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.BatchPredictRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for batch_predict Override in a subclass to manipulate the request or metadata @@ -109,8 +119,10 @@ def post_batch_predict( def pre_predict( self, request: prediction_service.PredictRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.PredictRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for predict Override in a subclass to manipulate the request or metadata @@ -315,7 +327,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the batch predict method over HTTP. @@ -326,8 +338,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -340,6 +354,7 @@ def __call__( http_options = ( _BasePredictionServiceRestTransport._BaseBatchPredict._get_http_options() ) + request, metadata = self._interceptor.pre_batch_predict(request, metadata) transcoded_request = _BasePredictionServiceRestTransport._BaseBatchPredict._get_transcoded_request( http_options, request @@ -354,6 +369,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.PredictionServiceClient.BatchPredict", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": "BatchPredict", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = PredictionServiceRestTransport._BatchPredict._get_response( self._host, @@ -373,7 +415,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_predict(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.PredictionServiceClient.batch_predict", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": "BatchPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _Predict( @@ -411,7 +475,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> prediction_service.PredictResponse: r"""Call the predict method over HTTP. @@ -422,8 +486,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.prediction_service.PredictResponse: @@ -435,6 +501,7 @@ def __call__( http_options = ( _BasePredictionServiceRestTransport._BasePredict._get_http_options() ) + request, metadata = self._interceptor.pre_predict(request, metadata) transcoded_request = _BasePredictionServiceRestTransport._BasePredict._get_transcoded_request( http_options, request @@ -453,6 +520,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.automl_v1beta1.PredictionServiceClient.Predict", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": "Predict", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = PredictionServiceRestTransport._Predict._get_response( self._host, @@ -474,7 +568,31 @@ def __call__( pb_resp = prediction_service.PredictResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_predict(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = prediction_service.PredictResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.automl_v1beta1.PredictionServiceClient.predict", + extra={ + "serviceName": "google.cloud.automl.v1beta1.PredictionService", + "rpcName": "Predict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json index 49126fa3a89d..0d8816baffc0 100644 --- a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json +++ b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.14.1" + "version": "0.1.0" }, "snippets": [ { @@ -51,7 +51,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -135,7 +135,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -220,7 +220,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -304,7 +304,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -385,7 +385,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -465,7 +465,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -546,7 +546,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -626,7 +626,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -707,7 +707,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -787,7 +787,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -872,7 +872,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -956,7 +956,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -1041,7 +1041,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -1125,7 +1125,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -1206,7 +1206,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.AnnotationSpec", @@ -1286,7 +1286,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.AnnotationSpec", @@ -1367,7 +1367,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Dataset", @@ -1447,7 +1447,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Dataset", @@ -1528,7 +1528,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.ModelEvaluation", @@ -1608,7 +1608,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.ModelEvaluation", @@ -1689,7 +1689,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Model", @@ -1769,7 +1769,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Model", @@ -1854,7 +1854,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -1938,7 +1938,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -2019,7 +2019,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager", @@ -2099,7 +2099,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager", @@ -2184,7 +2184,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", @@ -2268,7 +2268,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager", @@ -2349,7 +2349,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager", @@ -2429,7 +2429,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager", @@ -2510,7 +2510,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -2590,7 +2590,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -2675,7 +2675,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Dataset", @@ -2759,7 +2759,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Dataset", @@ -2844,7 +2844,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Model", @@ -2928,7 +2928,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.Model", @@ -3021,7 +3021,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3113,7 +3113,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -3202,7 +3202,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.PredictResponse", @@ -3290,7 +3290,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1.types.PredictResponse", diff --git a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json index a123a7bf2218..3957ec9b1b9f 100644 --- a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json +++ b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.14.1" + "version": "0.1.0" }, "snippets": [ { @@ -51,7 +51,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -135,7 +135,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -220,7 +220,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -304,7 +304,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -385,7 +385,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -465,7 +465,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -546,7 +546,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -626,7 +626,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -707,7 +707,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -787,7 +787,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -872,7 +872,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -956,7 +956,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -1041,7 +1041,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -1125,7 +1125,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -1210,7 +1210,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -1294,7 +1294,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -1375,7 +1375,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", @@ -1455,7 +1455,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", @@ -1536,7 +1536,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", @@ -1616,7 +1616,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", @@ -1697,7 +1697,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -1777,7 +1777,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -1858,7 +1858,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", @@ -1938,7 +1938,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", @@ -2019,7 +2019,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Model", @@ -2099,7 +2099,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Model", @@ -2180,7 +2180,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.TableSpec", @@ -2260,7 +2260,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.TableSpec", @@ -2345,7 +2345,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -2429,7 +2429,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -2510,7 +2510,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager", @@ -2590,7 +2590,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager", @@ -2671,7 +2671,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager", @@ -2751,7 +2751,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager", @@ -2832,7 +2832,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", @@ -2912,7 +2912,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager", @@ -2993,7 +2993,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager", @@ -3073,7 +3073,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager", @@ -3154,7 +3154,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager", @@ -3234,7 +3234,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager", @@ -3315,7 +3315,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3395,7 +3395,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -3476,7 +3476,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", @@ -3556,7 +3556,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", @@ -3637,7 +3637,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -3717,7 +3717,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.Dataset", @@ -3798,7 +3798,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.TableSpec", @@ -3878,7 +3878,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.TableSpec", @@ -3971,7 +3971,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -4063,7 +4063,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -4152,7 +4152,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", @@ -4240,7 +4240,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py index b93fc9fb0fbc..bed20847bf91 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -7922,6 +7922,7 @@ def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_dataset(request) @@ -7978,6 +7979,7 @@ def test_create_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_dataset(**mock_args) @@ -8111,6 +8113,7 @@ def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_dataset(request) @@ -8156,6 +8159,7 @@ def test_get_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_dataset(**mock_args) @@ -8292,6 +8296,7 @@ def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_datasets(request) @@ -8346,6 +8351,7 @@ def test_list_datasets_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_datasets(**mock_args) @@ -8533,6 +8539,7 @@ def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_dataset(request) @@ -8593,6 +8600,7 @@ def test_update_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_dataset(**mock_args) @@ -8728,6 +8736,7 @@ def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_dataset(request) @@ -8771,6 +8780,7 @@ def test_delete_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_dataset(**mock_args) @@ -8901,6 +8911,7 @@ def test_import_data_rest_required_fields(request_type=service.ImportDataRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.import_data(request) @@ -8955,6 +8966,7 @@ def test_import_data_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.import_data(**mock_args) @@ -9089,6 +9101,7 @@ def test_export_data_rest_required_fields(request_type=service.ExportDataRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_data(request) @@ -9145,6 +9158,7 @@ def test_export_data_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_data(**mock_args) @@ -9285,6 +9299,7 @@ def test_get_annotation_spec_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_annotation_spec(request) @@ -9332,6 +9347,7 @@ def test_get_annotation_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_annotation_spec(**mock_args) @@ -9463,6 +9479,7 @@ def test_create_model_rest_required_fields(request_type=service.CreateModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_model(request) @@ -9519,6 +9536,7 @@ def test_create_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_model(**mock_args) @@ -9652,6 +9670,7 @@ def test_get_model_rest_required_fields(request_type=service.GetModelRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model(request) @@ -9697,6 +9716,7 @@ def test_get_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model(**mock_args) @@ -9833,6 +9853,7 @@ def test_list_models_rest_required_fields(request_type=service.ListModelsRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_models(request) @@ -9887,6 +9908,7 @@ def test_list_models_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_models(**mock_args) @@ -10077,6 +10099,7 @@ def test_delete_model_rest_required_fields(request_type=service.DeleteModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_model(request) @@ -10120,6 +10143,7 @@ def test_delete_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_model(**mock_args) @@ -10246,6 +10270,7 @@ def test_update_model_rest_required_fields(request_type=service.UpdateModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_model(request) @@ -10306,6 +10331,7 @@ def test_update_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_model(**mock_args) @@ -10442,6 +10468,7 @@ def test_deploy_model_rest_required_fields(request_type=service.DeployModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.deploy_model(request) @@ -10485,6 +10512,7 @@ def test_deploy_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.deploy_model(**mock_args) @@ -10616,6 +10644,7 @@ def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undeploy_model(request) @@ -10659,6 +10688,7 @@ def test_undeploy_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undeploy_model(**mock_args) @@ -10790,6 +10820,7 @@ def test_export_model_rest_required_fields(request_type=service.ExportModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_model(request) @@ -10846,6 +10877,7 @@ def test_export_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_model(**mock_args) @@ -10986,6 +11018,7 @@ def test_get_model_evaluation_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model_evaluation(request) @@ -11033,6 +11066,7 @@ def test_get_model_evaluation_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model_evaluation(**mock_args) @@ -11184,6 +11218,7 @@ def test_list_model_evaluations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_model_evaluations(request) @@ -11250,6 +11285,7 @@ def test_list_model_evaluations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_model_evaluations(**mock_args) @@ -12368,6 +12404,7 @@ def test_create_dataset_rest_bad_request(request_type=service.CreateDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_dataset(request) @@ -12483,6 +12520,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_dataset(request) # Establish that the response is the type that we expect. @@ -12520,6 +12558,7 @@ def test_create_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -12562,6 +12601,7 @@ def test_get_dataset_rest_bad_request(request_type=service.GetDatasetRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_dataset(request) @@ -12601,6 +12641,7 @@ def test_get_dataset_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_dataset(request) # Establish that the response is the type that we expect. @@ -12641,6 +12682,7 @@ def test_get_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = dataset.Dataset.to_json(dataset.Dataset()) req.return_value.content = return_value @@ -12683,6 +12725,7 @@ def test_list_datasets_rest_bad_request(request_type=service.ListDatasetsRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_datasets(request) @@ -12718,6 +12761,7 @@ def test_list_datasets_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_datasets(request) # Establish that the response is the type that we expect. @@ -12754,6 +12798,7 @@ def test_list_datasets_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListDatasetsResponse.to_json( service.ListDatasetsResponse() ) @@ -12800,6 +12845,7 @@ def test_update_dataset_rest_bad_request(request_type=service.UpdateDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_dataset(request) @@ -12926,6 +12972,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_dataset(request) # Establish that the response is the type that we expect. @@ -12966,6 +13013,7 @@ def test_update_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) req.return_value.content = return_value @@ -13008,6 +13056,7 @@ def test_delete_dataset_rest_bad_request(request_type=service.DeleteDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_dataset(request) @@ -13038,6 +13087,7 @@ def test_delete_dataset_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_dataset(request) # Establish that the response is the type that we expect. @@ -13075,6 +13125,7 @@ def test_delete_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -13117,6 +13168,7 @@ def test_import_data_rest_bad_request(request_type=service.ImportDataRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.import_data(request) @@ -13147,6 +13199,7 @@ def test_import_data_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.import_data(request) # Establish that the response is the type that we expect. @@ -13184,6 +13237,7 @@ def test_import_data_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -13226,6 +13280,7 @@ def test_export_data_rest_bad_request(request_type=service.ExportDataRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_data(request) @@ -13256,6 +13311,7 @@ def test_export_data_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_data(request) # Establish that the response is the type that we expect. @@ -13293,6 +13349,7 @@ def test_export_data_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -13339,6 +13396,7 @@ def test_get_annotation_spec_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_annotation_spec(request) @@ -13378,6 +13436,7 @@ def test_get_annotation_spec_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_annotation_spec(request) # Establish that the response is the type that we expect. @@ -13418,6 +13477,7 @@ def test_get_annotation_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = annotation_spec.AnnotationSpec.to_json( annotation_spec.AnnotationSpec() ) @@ -13462,6 +13522,7 @@ def test_create_model_rest_bad_request(request_type=service.CreateModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_model(request) @@ -13594,6 +13655,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_model(request) # Establish that the response is the type that we expect. @@ -13631,6 +13693,7 @@ def test_create_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -13673,6 +13736,7 @@ def test_get_model_rest_bad_request(request_type=service.GetModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model(request) @@ -13712,6 +13776,7 @@ def test_get_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model(request) # Establish that the response is the type that we expect. @@ -13752,6 +13817,7 @@ def test_get_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = model.Model.to_json(model.Model()) req.return_value.content = return_value @@ -13794,6 +13860,7 @@ def test_list_models_rest_bad_request(request_type=service.ListModelsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_models(request) @@ -13829,6 +13896,7 @@ def test_list_models_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_models(request) # Establish that the response is the type that we expect. @@ -13865,6 +13933,7 @@ def test_list_models_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListModelsResponse.to_json(service.ListModelsResponse()) req.return_value.content = return_value @@ -13907,6 +13976,7 @@ def test_delete_model_rest_bad_request(request_type=service.DeleteModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_model(request) @@ -13937,6 +14007,7 @@ def test_delete_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_model(request) # Establish that the response is the type that we expect. @@ -13974,6 +14045,7 @@ def test_delete_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14018,6 +14090,7 @@ def test_update_model_rest_bad_request(request_type=service.UpdateModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_model(request) @@ -14161,6 +14234,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_model(request) # Establish that the response is the type that we expect. @@ -14201,6 +14275,7 @@ def test_update_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_model.Model.to_json(gca_model.Model()) req.return_value.content = return_value @@ -14243,6 +14318,7 @@ def test_deploy_model_rest_bad_request(request_type=service.DeployModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.deploy_model(request) @@ -14273,6 +14349,7 @@ def test_deploy_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.deploy_model(request) # Establish that the response is the type that we expect. @@ -14310,6 +14387,7 @@ def test_deploy_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14352,6 +14430,7 @@ def test_undeploy_model_rest_bad_request(request_type=service.UndeployModelReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undeploy_model(request) @@ -14382,6 +14461,7 @@ def test_undeploy_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undeploy_model(request) # Establish that the response is the type that we expect. @@ -14419,6 +14499,7 @@ def test_undeploy_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14461,6 +14542,7 @@ def test_export_model_rest_bad_request(request_type=service.ExportModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_model(request) @@ -14491,6 +14573,7 @@ def test_export_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_model(request) # Establish that the response is the type that we expect. @@ -14528,6 +14611,7 @@ def test_export_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14574,6 +14658,7 @@ def test_get_model_evaluation_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model_evaluation(request) @@ -14614,6 +14699,7 @@ def test_get_model_evaluation_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model_evaluation(request) # Establish that the response is the type that we expect. @@ -14655,6 +14741,7 @@ def test_get_model_evaluation_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = model_evaluation.ModelEvaluation.to_json( model_evaluation.ModelEvaluation() ) @@ -14701,6 +14788,7 @@ def test_list_model_evaluations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_model_evaluations(request) @@ -14736,6 +14824,7 @@ def test_list_model_evaluations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_model_evaluations(request) # Establish that the response is the type that we expect. @@ -14774,6 +14863,7 @@ def test_list_model_evaluations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListModelEvaluationsResponse.to_json( service.ListModelEvaluationsResponse() ) diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_prediction_service.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_prediction_service.py index 619c77d51854..25ca1ff6bc95 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_prediction_service.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_prediction_service.py @@ -1956,6 +1956,7 @@ def test_predict_rest_required_fields(request_type=prediction_service.PredictReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.predict(request) @@ -2013,6 +2014,7 @@ def test_predict_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.predict(**mock_args) @@ -2150,6 +2152,7 @@ def test_batch_predict_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_predict(request) @@ -2211,6 +2214,7 @@ def test_batch_predict_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_predict(**mock_args) @@ -2487,6 +2491,7 @@ def test_predict_rest_bad_request(request_type=prediction_service.PredictRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.predict(request) @@ -2520,6 +2525,7 @@ def test_predict_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.predict(request) # Establish that the response is the type that we expect. @@ -2559,6 +2565,7 @@ def test_predict_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = prediction_service.PredictResponse.to_json( prediction_service.PredictResponse() ) @@ -2605,6 +2612,7 @@ def test_batch_predict_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_predict(request) @@ -2635,6 +2643,7 @@ def test_batch_predict_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_predict(request) # Establish that the response is the type that we expect. @@ -2676,6 +2685,7 @@ def test_batch_predict_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py index bf1e2924313b..0b3cbcce8160 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -10368,6 +10368,7 @@ def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_dataset(request) @@ -10426,6 +10427,7 @@ def test_create_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_dataset(**mock_args) @@ -10560,6 +10562,7 @@ def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_dataset(request) @@ -10605,6 +10608,7 @@ def test_get_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_dataset(**mock_args) @@ -10742,6 +10746,7 @@ def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_datasets(request) @@ -10796,6 +10801,7 @@ def test_list_datasets_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_datasets(**mock_args) @@ -10984,6 +10990,7 @@ def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_dataset(request) @@ -11035,6 +11042,7 @@ def test_update_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_dataset(**mock_args) @@ -11169,6 +11177,7 @@ def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_dataset(request) @@ -11212,6 +11221,7 @@ def test_delete_dataset_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_dataset(**mock_args) @@ -11343,6 +11353,7 @@ def test_import_data_rest_required_fields(request_type=service.ImportDataRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.import_data(request) @@ -11397,6 +11408,7 @@ def test_import_data_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.import_data(**mock_args) @@ -11531,6 +11543,7 @@ def test_export_data_rest_required_fields(request_type=service.ExportDataRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_data(request) @@ -11587,6 +11600,7 @@ def test_export_data_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_data(**mock_args) @@ -11727,6 +11741,7 @@ def test_get_annotation_spec_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_annotation_spec(request) @@ -11774,6 +11789,7 @@ def test_get_annotation_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_annotation_spec(**mock_args) @@ -11905,6 +11921,7 @@ def test_get_table_spec_rest_required_fields(request_type=service.GetTableSpecRe response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table_spec(request) @@ -11952,6 +11969,7 @@ def test_get_table_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table_spec(**mock_args) @@ -12094,6 +12112,7 @@ def test_list_table_specs_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_table_specs(request) @@ -12151,6 +12170,7 @@ def test_list_table_specs_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_table_specs(**mock_args) @@ -12345,6 +12365,7 @@ def test_update_table_spec_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table_spec(request) @@ -12394,6 +12415,7 @@ def test_update_table_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table_spec(**mock_args) @@ -12527,6 +12549,7 @@ def test_get_column_spec_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_column_spec(request) @@ -12574,6 +12597,7 @@ def test_get_column_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_column_spec(**mock_args) @@ -12716,6 +12740,7 @@ def test_list_column_specs_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_column_specs(request) @@ -12773,6 +12798,7 @@ def test_list_column_specs_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_column_specs(**mock_args) @@ -12969,6 +12995,7 @@ def test_update_column_spec_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_column_spec(request) @@ -13018,6 +13045,7 @@ def test_update_column_spec_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_column_spec(**mock_args) @@ -13149,6 +13177,7 @@ def test_create_model_rest_required_fields(request_type=service.CreateModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_model(request) @@ -13205,6 +13234,7 @@ def test_create_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_model(**mock_args) @@ -13339,6 +13369,7 @@ def test_get_model_rest_required_fields(request_type=service.GetModelRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model(request) @@ -13384,6 +13415,7 @@ def test_get_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model(**mock_args) @@ -13521,6 +13553,7 @@ def test_list_models_rest_required_fields(request_type=service.ListModelsRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_models(request) @@ -13575,6 +13608,7 @@ def test_list_models_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_models(**mock_args) @@ -13766,6 +13800,7 @@ def test_delete_model_rest_required_fields(request_type=service.DeleteModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_model(request) @@ -13809,6 +13844,7 @@ def test_delete_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_model(**mock_args) @@ -13940,6 +13976,7 @@ def test_deploy_model_rest_required_fields(request_type=service.DeployModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.deploy_model(request) @@ -13983,6 +14020,7 @@ def test_deploy_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.deploy_model(**mock_args) @@ -14114,6 +14152,7 @@ def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undeploy_model(request) @@ -14157,6 +14196,7 @@ def test_undeploy_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undeploy_model(**mock_args) @@ -14288,6 +14328,7 @@ def test_export_model_rest_required_fields(request_type=service.ExportModelReque response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_model(request) @@ -14344,6 +14385,7 @@ def test_export_model_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_model(**mock_args) @@ -14487,6 +14529,7 @@ def test_export_evaluated_examples_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_evaluated_examples(request) @@ -14543,6 +14586,7 @@ def test_export_evaluated_examples_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_evaluated_examples(**mock_args) @@ -14683,6 +14727,7 @@ def test_get_model_evaluation_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model_evaluation(request) @@ -14730,6 +14775,7 @@ def test_get_model_evaluation_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model_evaluation(**mock_args) @@ -14874,6 +14920,7 @@ def test_list_model_evaluations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_model_evaluations(request) @@ -14928,6 +14975,7 @@ def test_list_model_evaluations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_model_evaluations(**mock_args) @@ -16362,6 +16410,7 @@ def test_create_dataset_rest_bad_request(request_type=service.CreateDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_dataset(request) @@ -16495,6 +16544,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_dataset(request) # Establish that the response is the type that we expect. @@ -16535,6 +16585,7 @@ def test_create_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) req.return_value.content = return_value @@ -16577,6 +16628,7 @@ def test_get_dataset_rest_bad_request(request_type=service.GetDatasetRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_dataset(request) @@ -16616,6 +16668,7 @@ def test_get_dataset_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_dataset(request) # Establish that the response is the type that we expect. @@ -16656,6 +16709,7 @@ def test_get_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = dataset.Dataset.to_json(dataset.Dataset()) req.return_value.content = return_value @@ -16698,6 +16752,7 @@ def test_list_datasets_rest_bad_request(request_type=service.ListDatasetsRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_datasets(request) @@ -16733,6 +16788,7 @@ def test_list_datasets_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_datasets(request) # Establish that the response is the type that we expect. @@ -16769,6 +16825,7 @@ def test_list_datasets_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListDatasetsResponse.to_json( service.ListDatasetsResponse() ) @@ -16815,6 +16872,7 @@ def test_update_dataset_rest_bad_request(request_type=service.UpdateDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_dataset(request) @@ -16950,6 +17008,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_dataset(request) # Establish that the response is the type that we expect. @@ -16990,6 +17049,7 @@ def test_update_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) req.return_value.content = return_value @@ -17032,6 +17092,7 @@ def test_delete_dataset_rest_bad_request(request_type=service.DeleteDatasetReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_dataset(request) @@ -17062,6 +17123,7 @@ def test_delete_dataset_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_dataset(request) # Establish that the response is the type that we expect. @@ -17099,6 +17161,7 @@ def test_delete_dataset_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17141,6 +17204,7 @@ def test_import_data_rest_bad_request(request_type=service.ImportDataRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.import_data(request) @@ -17171,6 +17235,7 @@ def test_import_data_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.import_data(request) # Establish that the response is the type that we expect. @@ -17208,6 +17273,7 @@ def test_import_data_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17250,6 +17316,7 @@ def test_export_data_rest_bad_request(request_type=service.ExportDataRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_data(request) @@ -17280,6 +17347,7 @@ def test_export_data_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_data(request) # Establish that the response is the type that we expect. @@ -17317,6 +17385,7 @@ def test_export_data_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17363,6 +17432,7 @@ def test_get_annotation_spec_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_annotation_spec(request) @@ -17402,6 +17472,7 @@ def test_get_annotation_spec_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_annotation_spec(request) # Establish that the response is the type that we expect. @@ -17442,6 +17513,7 @@ def test_get_annotation_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = annotation_spec.AnnotationSpec.to_json( annotation_spec.AnnotationSpec() ) @@ -17488,6 +17560,7 @@ def test_get_table_spec_rest_bad_request(request_type=service.GetTableSpecReques response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table_spec(request) @@ -17530,6 +17603,7 @@ def test_get_table_spec_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table_spec(request) # Establish that the response is the type that we expect. @@ -17571,6 +17645,7 @@ def test_get_table_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table_spec.TableSpec.to_json(table_spec.TableSpec()) req.return_value.content = return_value @@ -17613,6 +17688,7 @@ def test_list_table_specs_rest_bad_request(request_type=service.ListTableSpecsRe response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_table_specs(request) @@ -17648,6 +17724,7 @@ def test_list_table_specs_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_table_specs(request) # Establish that the response is the type that we expect. @@ -17684,6 +17761,7 @@ def test_list_table_specs_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListTableSpecsResponse.to_json( service.ListTableSpecsResponse() ) @@ -17734,6 +17812,7 @@ def test_update_table_spec_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table_spec(request) @@ -17862,6 +17941,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table_spec(request) # Establish that the response is the type that we expect. @@ -17903,6 +17983,7 @@ def test_update_table_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_table_spec.TableSpec.to_json(gca_table_spec.TableSpec()) req.return_value.content = return_value @@ -17947,6 +18028,7 @@ def test_get_column_spec_rest_bad_request(request_type=service.GetColumnSpecRequ response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_column_spec(request) @@ -17986,6 +18068,7 @@ def test_get_column_spec_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_column_spec(request) # Establish that the response is the type that we expect. @@ -18024,6 +18107,7 @@ def test_get_column_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = column_spec.ColumnSpec.to_json(column_spec.ColumnSpec()) req.return_value.content = return_value @@ -18070,6 +18154,7 @@ def test_list_column_specs_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_column_specs(request) @@ -18107,6 +18192,7 @@ def test_list_column_specs_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_column_specs(request) # Establish that the response is the type that we expect. @@ -18143,6 +18229,7 @@ def test_list_column_specs_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListColumnSpecsResponse.to_json( service.ListColumnSpecsResponse() ) @@ -18193,6 +18280,7 @@ def test_update_column_spec_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_column_spec(request) @@ -18339,6 +18427,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_column_spec(request) # Establish that the response is the type that we expect. @@ -18379,6 +18468,7 @@ def test_update_column_spec_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gca_column_spec.ColumnSpec.to_json(gca_column_spec.ColumnSpec()) req.return_value.content = return_value @@ -18421,6 +18511,7 @@ def test_create_model_rest_bad_request(request_type=service.CreateModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_model(request) @@ -18609,6 +18700,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_model(request) # Establish that the response is the type that we expect. @@ -18646,6 +18738,7 @@ def test_create_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -18688,6 +18781,7 @@ def test_get_model_rest_bad_request(request_type=service.GetModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model(request) @@ -18726,6 +18820,7 @@ def test_get_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model(request) # Establish that the response is the type that we expect. @@ -18765,6 +18860,7 @@ def test_get_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = model.Model.to_json(model.Model()) req.return_value.content = return_value @@ -18807,6 +18903,7 @@ def test_list_models_rest_bad_request(request_type=service.ListModelsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_models(request) @@ -18842,6 +18939,7 @@ def test_list_models_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_models(request) # Establish that the response is the type that we expect. @@ -18878,6 +18976,7 @@ def test_list_models_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListModelsResponse.to_json(service.ListModelsResponse()) req.return_value.content = return_value @@ -18920,6 +19019,7 @@ def test_delete_model_rest_bad_request(request_type=service.DeleteModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_model(request) @@ -18950,6 +19050,7 @@ def test_delete_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_model(request) # Establish that the response is the type that we expect. @@ -18987,6 +19088,7 @@ def test_delete_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19029,6 +19131,7 @@ def test_deploy_model_rest_bad_request(request_type=service.DeployModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.deploy_model(request) @@ -19059,6 +19162,7 @@ def test_deploy_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.deploy_model(request) # Establish that the response is the type that we expect. @@ -19096,6 +19200,7 @@ def test_deploy_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19138,6 +19243,7 @@ def test_undeploy_model_rest_bad_request(request_type=service.UndeployModelReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undeploy_model(request) @@ -19168,6 +19274,7 @@ def test_undeploy_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undeploy_model(request) # Establish that the response is the type that we expect. @@ -19205,6 +19312,7 @@ def test_undeploy_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19247,6 +19355,7 @@ def test_export_model_rest_bad_request(request_type=service.ExportModelRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_model(request) @@ -19277,6 +19386,7 @@ def test_export_model_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_model(request) # Establish that the response is the type that we expect. @@ -19314,6 +19424,7 @@ def test_export_model_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19358,6 +19469,7 @@ def test_export_evaluated_examples_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.export_evaluated_examples(request) @@ -19388,6 +19500,7 @@ def test_export_evaluated_examples_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.export_evaluated_examples(request) # Establish that the response is the type that we expect. @@ -19427,6 +19540,7 @@ def test_export_evaluated_examples_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19473,6 +19587,7 @@ def test_get_model_evaluation_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_model_evaluation(request) @@ -19513,6 +19628,7 @@ def test_get_model_evaluation_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_model_evaluation(request) # Establish that the response is the type that we expect. @@ -19554,6 +19670,7 @@ def test_get_model_evaluation_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = model_evaluation.ModelEvaluation.to_json( model_evaluation.ModelEvaluation() ) @@ -19600,6 +19717,7 @@ def test_list_model_evaluations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_model_evaluations(request) @@ -19635,6 +19753,7 @@ def test_list_model_evaluations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_model_evaluations(request) # Establish that the response is the type that we expect. @@ -19673,6 +19792,7 @@ def test_list_model_evaluations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = service.ListModelEvaluationsResponse.to_json( service.ListModelEvaluationsResponse() ) diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_prediction_service.py index 16fc61240b14..7b578a53cecd 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_prediction_service.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_prediction_service.py @@ -1957,6 +1957,7 @@ def test_predict_rest_required_fields(request_type=prediction_service.PredictReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.predict(request) @@ -2014,6 +2015,7 @@ def test_predict_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.predict(**mock_args) @@ -2151,6 +2153,7 @@ def test_batch_predict_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_predict(request) @@ -2213,6 +2216,7 @@ def test_batch_predict_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_predict(**mock_args) @@ -2489,6 +2493,7 @@ def test_predict_rest_bad_request(request_type=prediction_service.PredictRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.predict(request) @@ -2522,6 +2527,7 @@ def test_predict_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.predict(request) # Establish that the response is the type that we expect. @@ -2561,6 +2567,7 @@ def test_predict_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = prediction_service.PredictResponse.to_json( prediction_service.PredictResponse() ) @@ -2607,6 +2614,7 @@ def test_batch_predict_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_predict(request) @@ -2637,6 +2645,7 @@ def test_batch_predict_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_predict(request) # Establish that the response is the type that we expect. @@ -2678,6 +2687,7 @@ def test_batch_predict_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value